示例#1
0
    def set_values(self):
        """
        setting columns into the widget
        """

        self.clear()

        try:
            layers = script.vector_db(map=self.code_dict['input'])
            layer = self.get_layer()

            if layer == -1:
                for layer in layers.keys():
                    self.get_columns(layers, layer)
            else:
                self.get_columns(layers, layer)
        except:
            try:
                layers = script.vector_db(map=self.code_dict['map'])
                layer = self.get_layer()

                if layer == -1:
                    for layer in layers.keys():
                        self.get_columns(layers, layer)
                else:
                    self.get_columns(layers, layer)
            except:
                self.addItem('')
示例#2
0
def sqlTbl(name,columns):
    '''Create a new empty table in the same sqlite.db as stations and connect.'''
    db = grass.vector_db(options['stationsvect'])
    # check if table already linked to this vector
    if name in [db[l]['name'] for l in db]:
        grass.warning('Table %s already attached to %s.' %(name,options['stationsvect']))
        return None        
    # connect
    try: con = sql.connect(db[1]['database'])
    except:
        grass.warning('''Cant connect to sqlite database, make sure %s is connected
        to a sqlite database on layer 1.''' %options['stationsvect'])
        return None
    # sql connection
    cur = con.cursor()
    # create column type
    cols = []
    for c in columns:
        if 'i' in c[1]: typ='INT'
        elif 'f' in c[1]: typ='DOUBLE'
        elif 's' in c[1]: typ='VARCHAR(%s)' %abs(int(c[1][:-1]))
        else: raise ValueError('Dont know how to convert %s for table %s'%(c,name))
        cols += [c[0]+' '+typ]
    # Create table
    stm = 'CREATE TABLE IF NOT EXISTS %s (%s)' %(name,', '.join(cols))
    cur.execute(stm)
    con.commit()
    con.close()
    # attach to stations
    grass.run_command('v.db.connect',map=options['stationsvect'],
                      table=name, key=columns[0][0], layer=max(db)+1)
                      
    return
def get_size(vector):
    tmpvector = 'tmp_getsize_%s' % str(os.getpid())
    rm_vectors.append(tmpvector)
    grass.run_command('g.copy',
                      vector="%s,%s" % (vector, tmpvector),
                      overwrite=True,
                      quiet=True)
    if len(grass.vector_db(tmpvector)) == 0:
        grass.run_command('v.db.addtable', map=tmpvector, quiet=True)
    grass.run_command('v.db.addcolumn',
                      map=tmpvector,
                      columns="tmparea DOUBLE PRECISION",
                      quiet=True)
    grass.run_command('v.to.db',
                      map=tmpvector,
                      columns='tmparea',
                      option='area',
                      units='meters',
                      quiet=True,
                      overwrite=True)
    sizeselected = grass.parse_command('v.db.select', map=tmpvector, flags="v")
    sizesstr = [
        x.split('|')[1:] for x in sizeselected if x.startswith('tmparea|')
    ][0]
    sizes = [float(x) for x in sizesstr]
    return sum(sizes)
示例#4
0
def get_size(vector):
    tmpvector = "tmp_getsize_%s" % str(os.getpid())
    rm_vectors.append(tmpvector)
    grass.run_command("g.copy",
                      vector="%s,%s" % (vector, tmpvector),
                      overwrite=True,
                      quiet=True)
    if len(grass.vector_db(tmpvector)) == 0:
        grass.run_command("v.db.addtable", map=tmpvector, quiet=True)
    grass.run_command("v.db.addcolumn",
                      map=tmpvector,
                      columns="tmparea DOUBLE PRECISION",
                      quiet=True)
    grass.run_command(
        "v.to.db",
        map=tmpvector,
        columns="tmparea",
        option="area",
        units="meters",
        quiet=True,
        overwrite=True,
    )
    sizeselected = grass.parse_command("v.db.select", map=tmpvector, flags="v")
    sizesstr = [
        x.split("|")[1:] for x in sizeselected if x.startswith("tmparea|")
    ][0]
    sizes = [float(x) for x in sizesstr]
    return sum(sizes)
示例#5
0
def main():
    global tmp
    tmp = grass.tempfile()

    vector = options['map']
    layer = options['layer']
    column = options['column']
    where = options['where']
    perc = options['percentile']
    extend = flags['e']
    shellstyle = flags['g']

    
    fi = grass.vector_db(vector, stderr = nuldev)[int(layer)]
    table = fi['table']
    database = fi['database']
    driver = fi['driver']
    
    passflags = None
    if flags['e']:
	passflags = 'e'
    if flags['g']:
	if not passflags:
	    passflags = 'g'
	else:
	    passflags = passflags + 'g'
    
    grass.run_command('db.univar', table = table, column = column, 
                      database = database, driver = driver,
		      perc = perc, where = where, flags = passflags)
示例#6
0
def main():
    global tmp
    tmp = grass.tempfile()

    vector = options['map']
    layer = options['layer']
    column = options['column']
    where = options['where']
    perc = options['percentile']
    extend = flags['e']
    shellstyle = flags['g']

    fi = grass.vector_db(vector, stderr=nuldev)[int(layer)]
    table = fi['table']
    database = fi['database']
    driver = fi['driver']

    passflags = None
    if flags['e']:
        passflags = 'e'
    if flags['g']:
        if not passflags:
            passflags = 'g'
        else:
            passflags = passflags + 'g'

    grass.run_command('db.univar',
                      table=table,
                      column=column,
                      database=database,
                      driver=driver,
                      perc=perc,
                      where=where,
                      flags=passflags)
示例#7
0
def main():
    vector = options['map']
    layer = options['layer']
    column = options['column']
    value = options['value']
    qcolumn = options['qcolumn']
    where = options['where']

    mapset = grass.gisenv()['MAPSET']

    # does map exist in CURRENT mapset?
    if not grass.find_file(vector, element='vector', mapset=mapset)['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    try:
        f = grass.vector_db(vector)[int(layer)]
    except KeyError:
        grass.fatal(
            _('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'
              ))

    table = f['table']
    database = f['database']
    driver = f['driver']

    # checking column types
    try:
        coltype = grass.vector_columns(vector, layer)[column]['type']
    except KeyError:
        grass.fatal(_('Column <%s> not found') % column)

    if qcolumn:
        if value:
            grass.fatal(_('<value> and <qcolumn> are mutually exclusive'))
        # special case: we copy from another column
        value = qcolumn
    else:
        if not value:
            grass.fatal(_('Either <value> or <qcolumn> must be given'))
        # we insert a value
        if coltype.upper() not in ["INTEGER", "DOUBLE PRECISION"]:
            value = "'%s'" % value

    cmd = "UPDATE %s SET %s=%s" % (table, column, value)
    if where:
        cmd += " WHERE " + where

    grass.verbose("SQL: \"%s\"" % cmd)

    grass.write_command('db.execute',
                        input='-',
                        database=database,
                        driver=driver,
                        stdin=cmd)

    # write cmd history:
    grass.vector_history(vector)

    return 0
示例#8
0
def checkDbConnection(mapName):
    """! Checks if vector map has an attribute table.

    \todo check layer
    """
    ret = grass.vector_db(mapName)
    if not ret:
        grass.fatal(_("Vector map <%s> has no attribute table") % mapName)
示例#9
0
def checkDbConnection(mapName):
    """! Checks if vector map has an attribute table.

    \todo check layer
    """
    ret = grass.vector_db(mapName)
    if not ret:
        grass.fatal(_("Vector map <%s> has no attribute table") % mapName)
    def _CheckDBConnection(self):
        """Check DB connection"""
        nuldev = file(os.devnull, 'w+')
        # if map is not defined (happens with vnet initialization) or it doesn't exist
        try:
            self.layers = grass.vector_db(map=self.map, stderr=nuldev)
        except CalledModuleError:
            return False
        finally:  # always close nuldev
            nuldev.close()

        return bool(len(self.layers.keys()) > 0)
示例#11
0
def main():
    vector = options['map']
    layer = options['layer']
    column = options['column']
    value = options['value']
    qcolumn = options['qcolumn']
    where = options['where']

    mapset = grass.gisenv()['MAPSET']

    # does map exist in CURRENT mapset?
    if not grass.find_file(vector, element = 'vector', mapset = mapset)['file']:
	grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    try:
        f = grass.vector_db(vector)[int(layer)]
    except KeyError:
	grass.fatal(_('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'))

    table = f['table']
    database = f['database']
    driver = f['driver']

    # checking column types
    try:
        coltype = grass.vector_columns(vector, layer)[column]['type']
    except KeyError:
	grass.fatal(_('Column <%s> not found') % column)

    if qcolumn:
	if value:
	    grass.fatal(_('<value> and <qcolumn> are mutually exclusive'))
	# special case: we copy from another column
	value = qcolumn
    else:
	if not value:
	    grass.fatal(_('Either <value> or <qcolumn> must be given'))
	# we insert a value
	if coltype.upper() not in ["INTEGER", "DOUBLE PRECISION"]:
	    value = "'%s'" % value

    cmd = "UPDATE %s SET %s=%s" % (table, column, value)
    if where:
	cmd += " WHERE " + where

    grass.verbose("SQL: \"%s\"" % cmd)

    grass.write_command('db.execute', input = '-', database = database, driver = driver, stdin = cmd)

    # write cmd history:
    grass.vector_history(vector)

    return 0
    def _CheckDBConnection(self):
        """Check DB connection"""
        nuldev = file(os.devnull, 'w+')
        # if map is not defined (happens with vnet initialization) or it doesn't exist
        try:
            self.layers = grass.vector_db(map=self.map, stderr=nuldev)
        except CalledModuleError:
            return False
        finally:  # always close nuldev
            nuldev.close()

        return bool(len(self.layers.keys()) > 0)
示例#13
0
    def get_layers(self):
        """
        load layers (based on map in input or map widget)
        """

        self.clear()

        if self.gtask['element'] == 'layer_all':
            self.addItem('-1')

        try:
            layers = script.vector_db(map=self.code_dict['input'])
            for layer in layers:
                self.addItem(str(layer))
        except:
            try:
                layers = script.vector_db(map=self.code_dict['map'])
                for layer in layers:
                    self.addItem(str(layer))
            except:
                if self.count() == 0:
                    self.addItem('')
示例#14
0
def main():
    map = options['map']
    layer = options['layer']
    columns = options['columns']
    columns = [col.strip() for col in columns.split(',')]

    # does map exist in CURRENT mapset?
    mapset = grass.gisenv()['MAPSET']
    exists = bool(
        grass.find_file(map, element='vector', mapset=mapset)['file'])

    if not exists:
        grass.fatal(_("Vector map <%s> not found in current mapset") % map)

    try:
        f = grass.vector_db(map)[int(layer)]
    except KeyError:
        grass.fatal(
            _("There is no table connected to this map. Run v.db.connect or v.db.addtable first."
              ))

    table = f['table']
    database = f['database']
    driver = f['driver']
    column_existing = grass.vector_columns(map, int(layer)).keys()

    for col in columns:
        if not col:
            grass.fatal(
                _("There is an empty column. Did you leave a trailing comma?"))
        col_name = col.split(' ')[0].strip()
        if col_name in column_existing:
            grass.error(
                _("Column <%s> is already in the table. Skipping.") % col_name)
            continue
        grass.verbose(_("Adding column <%s> to the table") % col_name)
        p = grass.feed_command('db.execute',
                               input='-',
                               database=database,
                               driver=driver)
        res = "ALTER TABLE {} ADD COLUMN {}".format(table, col)
        p.stdin.write(encode(res))
        grass.debug(res)
        p.stdin.close()
        if p.wait() != 0:
            grass.fatal(_("Unable to add column <%s>.") % col)

    # write cmd history:
    grass.vector_history(map)
示例#15
0
def main():
    table = options['table']
    force = flags['f']

    # check if DB parameters are set, and if not set them.
    grass.run_command('db.connect', flags = 'c')

    kv = grass.db_connection()
    database = kv['database']
    driver = kv['driver']
    # schema needed for PG?

    if force:
	grass.message(_("Forcing ..."))

    # check if table exists
    nuldev = file(os.devnull, 'w')
    if not grass.db_table_exist(table, stdout = nuldev, stderr = nuldev):
	grass.fatal(_("Table <%s> not found in current mapset") % table)

    # check if table is used somewhere (connected to vector map)
    used = []
    vects = grass.list_strings('vect')
    for vect in vects:
	for f in grass.vector_db(vect, stderr = nuldev).itervalues():
	    if not f:
		continue
	    if f['table'] == table:
		used.append(vect)
		break
    if used:
	grass.warning(_("Deleting table <%s> which is attached to following map(s):") % table)
	for vect in used:
	    grass.message(vect)

    if not force:
	grass.message(_("The table <%s> would be deleted.") % table)
	grass.message("")
	grass.message(_("You must use the force flag to actually remove it. Exiting."))
	sys.exit(0)

    p = grass.feed_command('db.execute', input = '-', database = database, driver = driver)
    p.stdin.write("DROP TABLE " + table)
    p.stdin.close()
    p.wait()
    if p.returncode != 0:
  	grass.fatal(_("Cannot continue (problem deleting table)."))
示例#16
0
def main():
    global tmp
    tmp = gscript.tempfile()

    vector = options["map"]
    layer = options["layer"]
    column = options["column"]
    where = options["where"]
    perc = options["percentile"]

    if not gscript.find_file(vector, element="vector")["file"]:
        gscript.fatal(_("Vector map <%s> not found") % vector)

    try:
        fi = gscript.vector_db(vector, stderr=nuldev)[int(layer)]
    except KeyError:
        gscript.fatal(_("No attribute table linked to layer <%s>") % layer)

    table = fi["table"]
    database = fi["database"]
    driver = fi["driver"]

    passflags = None
    if flags["e"]:
        passflags = "e"
    if flags["g"]:
        if not passflags:
            passflags = "g"
        else:
            passflags = passflags + "g"

    try:
        gscript.run_command(
            "db.univar",
            table=table,
            column=column,
            database=database,
            driver=driver,
            perc=perc,
            where=where,
            flags=passflags,
        )
    except CalledModuleError:
        sys.exit(1)
示例#17
0
def main():
    global tmp
    tmp = grass.tempfile()

    vector = options['map']
    layer = options['layer']
    column = options['column']
    where = options['where']
    perc = options['percentile']
    extend = flags['e']
    shellstyle = flags['g']

    if not grass.find_file(vector, element='vector')['file']:
        grass.fatal(_("Vector map <%s> not found") % vector)

    try:
        fi = grass.vector_db(vector, stderr=nuldev)[int(layer)]
    except KeyError:
        grass.fatal(_("No attribute table linked to layer <%s>") % layer)

    table = fi['table']
    database = fi['database']
    driver = fi['driver']

    passflags = None
    if flags['e']:
        passflags = 'e'
    if flags['g']:
        if not passflags:
            passflags = 'g'
        else:
            passflags = passflags + 'g'

    try:
        grass.run_command('db.univar',
                          table=table,
                          column=column,
                          database=database,
                          driver=driver,
                          perc=perc,
                          where=where,
                          flags=passflags)
    except CalledModuleError:
        sys.exit(1)
示例#18
0
def main():
    old_database = options['old_database']
    new_database = options['new_database']
    old_schema = options['old_schema']
    new_schema = options['new_schema']

    mapset = grass.gisenv()['MAPSET']

    nuldev = file(os.devnull, 'w')

    for vect in grass.list_grouped('vect')[mapset]:
	vect = "%s@%s" % (vect, mapset)
	grass.message(_("Reconnecting vector <%s>") % vect)
	for f in grass.vector_db(map, stderr = nuldev).itervalues():
	    layer = f['layer']
	    schema_table = f['table']
	    key = f['key']
	    database = f['database']
	    driver = f['driver']
	    if '.' in schema_table:
		st = schema_table.split('.', 1)
		schema = st[0]
		table = st[1]
	    else:
		schema = ''
		table = schema_table

	    if new_schema:
		new_schema_table = "%s.%s" % (new_schema, table)
	    else:
		new_schema_table = table

	    grass.message(_("SCHEMA = %s TABLE = %s NEW_SCHEMA_TABLE = %s") % (schema, table, new_schema_table))
	    if database == old_database and schema == old_schema:
		grass.message(_("Reconnecting layer ") + layer)
		grass.message(_("v.db.connect -o map=%s layer=%s driver=%s database=%s table=%s key=%s") %
			      (vect, layer, driver, new_database, new_schema_table, key))
		grass.run_command('v.db.connect', flags = 'o', map = vect,
				  layer = layer, driver = driver, database = new_database,
				  table = new_schema_table, key = key)
	    else:
		grass.message(_("Layer <%s> will not be reconnected, database or schema do not match.") % layer)
示例#19
0
def main():
    map = options['map']
    layer = options['layer']
    columns = options['columns']
    columns = [col.strip() for col in columns.split(',')]

    # does map exist in CURRENT mapset?
    mapset = grass.gisenv()['MAPSET']
    exists = bool(grass.find_file(map, element='vector', mapset=mapset)['file'])

    if not exists:
        grass.fatal(_("Vector map <%s> not found in current mapset") % map)

    try:
        f = grass.vector_db(map)[int(layer)]
    except KeyError:
        grass.fatal(
            _("There is no table connected to this map. Run v.db.connect or v.db.addtable first."))

    table = f['table']
    database = f['database']
    driver = f['driver']
    column_existing = grass.vector_columns(map, int(layer)).keys()

    for col in columns:
        if not col:
            grass.fatal(_("There is an empty column. Did you leave a trailing comma?"))
        col_name = col.split(' ')[0].strip()
        if col_name in column_existing:
            grass.error(_("Column <%s> is already in the table. Skipping.") % col_name)
            continue
        grass.verbose(_("Adding column <%s> to the table") % col_name)
        p = grass.feed_command('db.execute', input='-', database=database, driver=driver)
        p.stdin.write("ALTER TABLE %s ADD COLUMN %s" % (table, col))
        grass.debug("ALTER TABLE %s ADD COLUMN %s" % (table, col))
        p.stdin.close()
        if p.wait() != 0:
            grass.fatal(_("Unable to add column <%s>.") % col)

    # write cmd history:
    grass.vector_history(map)
示例#20
0
def main():
    global tmp
    tmp = grass.tempfile()

    vector = options['map']
    layer = options['layer']
    column = options['column']
    where = options['where']
    perc = options['percentile']
    extend = flags['e']
    shellstyle = flags['g']

    if not grass.find_file(vector, element='vector')['file']:
        grass.fatal(_("Vector map <%s> not found") % vector)

    try:
        fi = grass.vector_db(vector, stderr = nuldev)[int(layer)]
    except KeyError:
        grass.fatal(_("No attribute table linked to layer <%s>") % layer)
                
    table = fi['table']
    database = fi['database']
    driver = fi['driver']
    
    passflags = None
    if flags['e']:
	passflags = 'e'
    if flags['g']:
	if not passflags:
	    passflags = 'g'
	else:
	    passflags = passflags + 'g'

    try:
        grass.run_command('db.univar', table = table, column = column, 
                          database = database, driver = driver,
                          perc = perc, where = where, flags = passflags)
    except CalledModuleError:
        sys.exit(1)
示例#21
0
def main():
    vector = options["map"]
    table = options["table"]
    layer = options["layer"]
    columns = options["columns"]
    key = options["key"]

    # does map exist in CURRENT mapset?
    mapset = grass.gisenv()["MAPSET"]
    if not grass.find_file(vector, element="vector", mapset=mapset)["file"]:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    map_name = vector.split("@")[0]

    if not table:
        if layer == "1":
            grass.verbose(
                _("Using vector map name as table name: <%s>") % map_name)
            table = map_name
        else:
            # to avoid tables with identical names on higher layers
            table = "%s_%s" % (map_name, layer)
            grass.verbose(
                _("Using vector map name extended by layer number as table name: <%s>"
                  ) % table)
    else:
        grass.verbose(_("Using user specified table name: %s") % table)

    # check if DB parameters are set, and if not set them.
    grass.run_command("db.connect", flags="c", quiet=True)
    grass.verbose(
        _("Creating new DB connection based on default mapset settings..."))
    kv = grass.db_connection()
    database = kv["database"]
    driver = kv["driver"]
    schema = kv["schema"]

    database2 = database.replace("$MAP/", map_name + "/")

    # maybe there is already a table linked to the selected layer?
    nuldev = open(os.devnull, "w")
    try:
        grass.vector_db(map_name, stderr=nuldev)[int(layer)]
        grass.fatal(_("There is already a table linked to layer <%s>") % layer)
    except KeyError:
        pass

    # maybe there is already a table with that name?
    tables = grass.read_command("db.tables",
                                flags="p",
                                database=database2,
                                driver=driver,
                                stderr=nuldev)
    tables = decode(tables)

    if table not in tables.splitlines():
        colnames = []
        column_def = []
        if columns:
            column_def = []
            for x in " ".join(columns.split()).split(","):
                colname = x.lower().split()[0]
                if colname in colnames:
                    grass.fatal(
                        _("Duplicate column name '%s' not allowed") % colname)
                colnames.append(colname)
                column_def.append(x)

        # if not existing, create it:
        if key not in colnames:
            column_def.insert(0, "%s integer" % key)
        column_def = ",".join(column_def)

        grass.verbose(_("Creating table with columns (%s)...") % column_def)

        sql = "CREATE TABLE %s (%s)" % (table, column_def)
        try:
            grass.run_command("db.execute",
                              database=database2,
                              driver=driver,
                              sql=sql)
        except CalledModuleError:
            grass.fatal(_("Unable to create table <%s>") % table)

    # connect the map to the DB:
    if schema:
        table = "{schema}.{table}".format(schema=schema, table=table)
    grass.verbose(_("Connecting new table to vector map <%s>...") % map_name)
    grass.run_command(
        "v.db.connect",
        quiet=True,
        map=map_name,
        database=database,
        driver=driver,
        layer=layer,
        table=table,
        key=key,
    )

    # finally we have to add cats into the attribute DB to make
    # modules such as v.what.rast happy: (creates new row for each
    # vector line):
    try:
        grass.run_command(
            "v.to.db",
            overwrite=True,
            map=map_name,
            layer=layer,
            option="cat",
            column=key,
            qlayer=layer,
        )
    except CalledModuleError:
        # remove link
        grass.run_command("v.db.connect",
                          quiet=True,
                          flags="d",
                          map=map_name,
                          layer=layer)
        return 1

    grass.verbose(_("Current attribute table links:"))
    if grass.verbosity() > 2:
        grass.run_command("v.db.connect", flags="p", map=map_name)

    # write cmd history:
    grass.vector_history(map_name)

    return 0
示例#22
0
def main():
    vector = options['map']
    table = options['table']
    layer = options['layer']
    columns = options['columns']
    key = options['key']
    
    # does map exist in CURRENT mapset?
    mapset = grass.gisenv()['MAPSET']
    if not grass.find_file(vector, element = 'vector', mapset = mapset)['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)
    
    map_name = vector.split('@')[0]
    
    if not table:
        if layer == '1':
            grass.verbose(_("Using vector map name as table name: <%s>") % map_name)
            table = map_name
        else:
            # to avoid tables with identical names on higher layers
            table = "%s_%s" % (map_name, layer)
            grass.verbose(_("Using vector map name extended by layer number as table name: <%s>") % table)
    else:
        grass.verbose(_("Using user specified table name: %s") % table)
    
    # check if DB parameters are set, and if not set them.
    grass.run_command('db.connect', flags = 'c')
    grass.verbose(_("Creating new DB connection based on default mapset settings..."))
    kv = grass.db_connection()
    database = kv['database']
    driver = kv['driver']
    schema = kv['schema']
    
    # maybe there is already a table linked to the selected layer?
    nuldev = file(os.devnull, 'w')
    try:
        grass.vector_db(map_name, stderr = nuldev)[int(layer)]
        grass.fatal(_("There is already a table linked to layer <%s>") % layer)
    except KeyError:
        pass
    
    # maybe there is already a table with that name?
    tables = grass.read_command('db.tables', flags = 'p', database = database, driver = driver,
                                stderr = nuldev)
    
    if not table in tables.splitlines():
        if columns:
            column_def = map(lambda x: x.strip().lower(), columns.strip().split(','))
        else:
            column_def = []
        
        # if not existing, create it:
        column_def_key = "%s integer" % key
        if column_def_key not in column_def:
            column_def.insert(0, column_def_key)
        column_def = ','.join(column_def)
        
        grass.verbose(_("Creating table with columns (%s)...") % column_def)
        
        sql = "CREATE TABLE %s (%s)" % (table, column_def)
        try:
            grass.run_command('db.execute',
                              database=database, driver=driver, sql=sql)
        except CalledModuleError:
            grass.fatal(_("Unable to create table <%s>") % table)

    # connect the map to the DB:
    if schema:
        table = '{schema}.{table}'.format(schema=schema, table=table)
    grass.run_command('v.db.connect', quiet = True,
                      map = map_name, database = database, driver = driver,
                      layer = layer, table = table, key = key)
    
    # finally we have to add cats into the attribute DB to make modules such as v.what.rast happy:
    # (creates new row for each vector line):
    grass.run_command('v.to.db', map = map_name, layer = layer,
                      option = 'cat', column = key, qlayer = layer)
    
    grass.verbose(_("Current attribute table links:"))
    if grass.verbosity() > 2:
        grass.run_command('v.db.connect', flags = 'p', map = map_name)
    
    # write cmd history:
    grass.vector_history(map_name)
    
    return 0
示例#23
0
def main():
    global tmp, sqltmp, tmpname, nuldev, vector, rastertmp
    rastertmp = False
    # setup temporary files
    tmp = grass.tempfile()
    sqltmp = tmp + ".sql"
    # we need a random name
    tmpname = grass.basename(tmp)

    nuldev = open(os.devnull, 'w')

    rasters = options['raster'].split(',')
    colprefixes = options['column_prefix'].split(',')
    vector = options['map']
    layer = options['layer']
    vtypes = options['type']
    where = options['where']
    percentile = options['percentile']
    basecols = options['method'].split(',')

    ### setup enviro vars ###
    env = grass.gisenv()
    mapset = env['MAPSET']

    vs = vector.split('@')
    if len(vs) > 1:
        vect_mapset = vs[1]
    else:
        vect_mapset = mapset

    # does map exist in CURRENT mapset?
    if vect_mapset != mapset or not grass.find_file(vector, 'vector',
                                                    mapset)['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map=vector)[int(layer)]
    except KeyError:
        grass.fatal(
            _('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'
              ))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # colprefix for every raster map?
    if len(colprefixes) != len(rasters):
        grass.fatal(
            _("Number of raster maps ({0}) different from \
                      number of column prefixes ({1})".format(
                len(rasters), len(colprefixes))))

    vector = vs[0]

    rastertmp = "%s_%s" % (vector, tmpname)

    for raster in rasters:
        # check the input raster map
        if not grass.find_file(raster, 'cell')['file']:
            grass.fatal(_("Raster map <%s> not found") % raster)

    # save current settings:
    grass.use_temp_region()

    # Temporarily aligning region resolution to $RASTER resolution
    # keep boundary settings
    grass.run_command('g.region', align=rasters[0])

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map=vector)[int(layer)]
    except KeyError:
        grass.fatal(
            _('There is no table connected to this map. '
              'Run v.db.connect or v.db.addtable first.'))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # Find out which table is linked to the vector map on the given layer
    if not fi['table']:
        grass.fatal(
            _('There is no table connected to this map. '
              'Run v.db.connect or v.db.addtable first.'))

    # prepare base raster for zonal statistics
    prepare_base_raster(vector, layer, rastertmp, vtypes, where)

    # get number of raster categories to be processed
    number = get_nr_of_categories(vector, layer, rasters, rastertmp,
                                  percentile, colprefixes, basecols, dbfdriver,
                                  flags['c'])

    # calculate statistics:
    grass.message(_("Processing input data (%d categories)...") % number)

    for i in range(len(rasters)):
        raster = rasters[i]

        colprefix, variables_dbf, variables, colnames, extstat = set_up_columns(
            vector, layer, percentile, colprefixes[i], basecols, dbfdriver,
            flags['c'])

        # get rid of any earlier attempts
        grass.try_remove(sqltmp)

        # do the stats
        perform_stats(raster, percentile, fi, dbfdriver, colprefix,
                      variables_dbf, variables, colnames, extstat)

        grass.message(_("Updating the database ..."))
        exitcode = 0
        try:
            grass.run_command('db.execute',
                              input=sqltmp,
                              database=fi['database'],
                              driver=fi['driver'])
            grass.verbose(
                (_("Statistics calculated from raster map <{raster}>"
                   " and uploaded to attribute table"
                   " of vector map <{vector}>.").format(raster=raster,
                                                        vector=vector)))
        except CalledModuleError:
            grass.warning(
                _("Failed to upload statistics to attribute table of vector map <%s>."
                  ) % vector)
            exitcode = 1

            sys.exit(exitcode)
示例#24
0
def main():
    global tmp, prefix
    tmp = grass.tempfile()
    prefix = "concave_hull_tmp_%d" % os.getpid()

    input = options["input"]
    output = options["output"]
    perc = options["threshold"]

    perc = float(perc) + 90

    delaunay = prefix + "_delaunay"

    grass.message(_("Delaunay triangulation..."))
    grass.run_command("v.delaunay", input=input, output=delaunay, quiet=True)

    out_points = prefix + "_delaunay_pnts"
    out_lines_nocat = prefix + "_delaunay_lines_nocat"
    out_lines = prefix + "_delaunay_lines"
    out_lines_tmp = prefix + "_delaunay_lines_tmp"

    grass.message(_("Geometry conversion..."))
    grass.run_command(
        "v.extract",
        input=delaunay,
        output=out_lines_tmp,
        type="boundary",
        layer="-1",
        quiet=True,
    )
    grass.run_command(
        "v.type",
        input=out_lines_tmp,
        output=out_lines_nocat,
        from_type="boundary",
        to_type="line",
        quiet=True,
    )
    grass.run_command(
        "v.type",
        input=delaunay,
        output=out_points,
        from_type="centroid",
        to_type="point",
        quiet=True,
    )

    grass.run_command(
        "v.category",
        input=out_lines_nocat,
        output=out_lines,
        op="add",
        type="line",
        quiet=True,
    )
    grass.run_command(
        "v.db.addtable",
        map=out_lines,
        col="cat integer,length double precision",
        quiet=True,
    )

    grass.message(_("Evaluating threshold..."))
    grass.run_command(
        "v.to.db", map=out_lines, type="line", op="length", col="length", quiet=True
    )

    db_info = grass.vector_db(map=out_lines, layer="1")[1]
    table = db_info["table"]
    database = db_info["database"]
    driver = db_info["driver"]
    sql = "SELECT length FROM %s" % (table)
    tmpf = open(tmp, "w")
    grass.run_command(
        "db.select",
        flags="c",
        table=table,
        database=database,
        driver=driver,
        sql=sql,
        stdout=tmpf,
    )
    tmpf.close()

    # check if result is empty
    tmpf = open(tmp, "r")
    if tmpf.read(1) == "":
        grass.fatal(_("Table <%s> contains no data.") % table)
    tmpf.close()

    N = 0
    tmpf = open(tmp)
    for line in tmpf:
        N += 1
    tmpf.close()

    max_length = 0.0
    sortfile(tmp, tmp + ".sort")
    ppos = round(N * perc / 100)

    perc_orig = perc
    while ppos >= N and perc >= 90:
        perc -= 1
        ppos = round(N * perc / 100)

    if perc == 89:
        grass.fatal(_("Cannot calculate hull. Too few points."))

    if perc_orig > perc:
        thresh = int(perc) - 90
        grass.warning(_("Threshold reduced to %d to calculate hull" % thresh))

    inf = open(tmp + ".sort", "r")
    l = 0
    for line in inf:
        if l == ppos:
            max_length = float(line.rstrip("\r\n"))
            break
        l += 1
    inf.close()

    grass.message(_("Feature selection..."))
    lines_concave = prefix + "_delaunay_lines_select"
    lines_concave_nocat = prefix + "_delaunay_lines_select_nocat"
    grass.run_command(
        "v.extract",
        input=out_lines,
        output=lines_concave,
        type="line",
        where="length < %f" % max_length,
        quiet=True,
    )

    grass.run_command(
        "v.category",
        input=lines_concave,
        output=lines_concave_nocat,
        type="line",
        op="del",
        cat="-1",
        quiet=True,
    )

    borders_concave = prefix + "_delaunay_borders_select"
    grass.run_command(
        "v.type",
        input=lines_concave_nocat,
        output=borders_concave,
        from_type="line",
        to_type="boundary",
        quiet=True,
    )

    areas_concave = prefix + "_delaunay_areas_select"
    grass.run_command(
        "v.centroids", input=borders_concave, output=areas_concave, quiet=True
    )
    grass.run_command("v.db.droptable", map=areas_concave, flags="f", quiet=True)
    grass.run_command(
        "v.db.addtable", map=areas_concave, col="cat integer,count integer", quiet=True
    )

    grass.run_command(
        "v.vect.stats",
        points=out_points,
        areas=areas_concave,
        ccolumn="count",
        quiet=True,
    )

    areas_concave_extr = prefix + "_delaunay_areas_extract"

    grass.run_command(
        "v.extract",
        input=areas_concave,
        output=areas_concave_extr,
        type="area",
        where="count = 1",
        quiet=True,
    )

    grass.message(_("The following warnings can be ignored"), flag="i")
    grass.run_command(
        "v.dissolve",
        input=areas_concave_extr,
        output=output,
        col="count",
        layer="1",
        quiet=True,
    )
    grass.message(_("Concave hull successfully created"))
示例#25
0
def main():
    mapname = options['map']
    option = options['option']
    layer = options['layer']
    units = options['units']

    nuldev = file(os.devnull, 'w')

    if not grass.find_file(mapname, 'vector')['file']:
        grass.fatal(_("Vector map <%s> not found") % mapname)

    colnames = grass.vector_columns(mapname,
                                    layer,
                                    getDict=False,
                                    stderr=nuldev)
    if not colnames:
        colnames = ['cat']

    if option == 'coor':
        columns = ['dummy1', 'dummy2', 'dummy3']
        extracolnames = ['x', 'y', 'z']
    else:
        columns = ['dummy1']
        extracolnames = [option]

    if units in ['p', 'percent']:
        unitsp = 'meters'
    elif units:
        unitsp = units
    else:
        unitsp = None

    # NOTE: we suppress -1 cat and 0 cat
    if colnames:
        p = grass.pipe_command('v.db.select',
                               quiet=True,
                               flags='c',
                               map=mapname,
                               layer=layer)
        records1 = []
        for line in p.stdout:
            cols = line.rstrip('\r\n').split('|')
            if cols[0] == '0':
                continue
            records1.append([int(cols[0])] + cols[1:])
        p.wait()
        if p.returncode != 0:
            sys.exit(1)

        records1.sort()

        if len(records1) == 0:
            try:
                f = grass.vector_db(map=mapname)[int(layer)]
                grass.fatal(
                    _("There is a table connected to input vector map '%s', but"
                      "there are no categories present in the key column '%s'. Consider using"
                      "v.to.db to correct this.") % (mapname, f['key']))
            except KeyError:
                pass

        #fetch the requested attribute sorted by cat:
        p = grass.pipe_command('v.to.db',
                               flags='p',
                               quiet=True,
                               map=mapname,
                               option=option,
                               columns=columns,
                               layer=layer,
                               units=unitsp)
        records2 = []
        for line in p.stdout:
            fields = line.rstrip('\r\n').split('|')
            if fields[0] in ['cat', '-1', '0']:
                continue
            records2.append([int(fields[0])] + fields[1:-1] +
                            [float(fields[-1])])
        p.wait()
        records2.sort()

        #make pre-table
        # len(records1) may not be the same as len(records2) because
        # v.db.select can return attributes that are not linked to features.
        records3 = []
        for r2 in records2:
            records3.append(
                filter(lambda r1: r1[0] == r2[0], records1)[0] + r2[1:])
    else:
        records1 = []
        p = grass.pipe_command('v.category',
                               inp=mapname,
                               layer=layer,
                               option='print')
        for line in p.stdout:
            field = int(line.rstrip())
            if field > 0:
                records1.append(field)
        p.wait()
        records1.sort()
        records1 = uniq(records1)

        #make pre-table
        p = grass.pipe_command('v.to.db',
                               flags='p',
                               map=mapname,
                               option=option,
                               columns=columns,
                               layer=layer,
                               units=unitsp)
        records3 = []
        for line in p.stdout:
            fields = line.split('|')
            if fields[0] in ['cat', '-1', '0']:
                continue
            records3.append([int(fields[0])] + fields[1:])
        p.wait()
        records3.sort()

    # print table header
    sys.stdout.write('|'.join(colnames + extracolnames) + '\n')

    #make and print the table:
    numcols = len(colnames) + len(extracolnames)

    # calculate percents if requested
    if units != '' and units in ['p', 'percent']:
        # calculate total area value
        areatot = 0
        for r in records3:
            areatot += float(r[-1])

        # calculate area percentages
        records4 = [float(r[-1]) * 100 / areatot for r in records3]
        records3 = [r1 + [r4] for r1, r4 in zip(records1, records4)]

    # sort results
    if options['sort']:
        if options['sort'] == 'asc':
            records3.sort(key=lambda r: r[-1])
        else:
            records3.sort(key=lambda r: r[-1], reverse=True)

    for r in records3:
        sys.stdout.write('|'.join(map(str, r)) + '\n')
示例#26
0
def main():
    """Process command line parameters and update the table"""
    options, flags = gs.parser()

    vector = options["map"]
    layer = options["layer"]
    where = options["where"]
    column = options["column"]
    expression = options["expression"]
    condition = options["condition"]
    functions_file = options["functions"]

    # Map needs to be in the current mapset
    mapset = gs.gisenv()["MAPSET"]
    if not gs.find_file(vector, element="vector", mapset=mapset)["file"]:
        gs.fatal(
            _("Vector map <{vector}> does not exist or is not in the current mapset"
              "(<{mapset}>) and therefore it cannot be modified").format(
                  **locals()))

    # Map+layer needs to have a table connected
    try:
        # TODO: Support @OGR vector maps? Probably not supported by db.execute anyway.
        db_info = gs.vector_db(vector)[int(layer)]
    except KeyError:
        gs.fatal(
            _("There is no table connected to map <{vector}> (layer <{layer}>)."
              " Use v.db.connect or v.db.addtable to add it.").format(
                  **locals()))
    table = db_info["table"]
    database = db_info["database"]
    driver = db_info["driver"]
    columns = gs.vector_columns(vector, layer)

    # Check that column exists
    try:
        column_info = columns[column]
    except KeyError:
        gs.fatal(
            _("Column <{column}> not found. Use v.db.addcolumn to create it.").
            format(column=column))
    column_type = column_info["type"]

    # Check that optional function file exists
    if functions_file:
        if not os.access(functions_file, os.R_OK):
            gs.fatal(_("File <{file}> not found").format(file=functions_file))

    # Define Python functions
    # Here we need the full-deal eval and exec functions and can't sue less
    # general alternatives such as ast.literal_eval.
    def expression_function(**kwargs):
        return eval(expression, globals(), kwargs)  # pylint: disable=eval-used

    def condition_function(**kwargs):
        return eval(condition, globals(), kwargs)  # pylint: disable=eval-used

    # TODO: Add error handling for failed imports.
    if options["packages"]:
        packages = options["packages"].split(",")
        for package in packages:
            # pylint: disable=exec-used
            exec(f"import {package}", globals(), globals())
            if flags["s"]:
                exec(f"from {package} import *", globals(), globals())

    # TODO: Add error handling for invalid syntax.
    if functions_file:
        with open(functions_file) as file:
            exec(file.read(), globals(), globals())  # pylint: disable=exec-used

    # Get table contents
    if not where:
        # The condition needs to be None, an empty string is passed through.
        where = None
    if gs.version()["version"] < "7.9":
        sep = "|"  # Only one char sep for Python csv package.
        null = "NULL"
        csv_text = gs.read_command(
            "v.db.select",
            map=vector,
            layer=layer,
            separator=sep,
            null=null,
            where=where,
        )
        table_contents = csv_loads(csv_text, delimeter=sep, null=null)
    else:
        # TODO: XXX is a workaround for a bug in v.db.select -j
        json_text = gs.read_command("v.db.select",
                                    map=vector,
                                    layer=layer,
                                    flags="j",
                                    null="XXX",
                                    where=where)
        table_contents = json.loads(json_text)

    cmd = python_to_transaction(
        table=table,
        table_contents=table_contents,
        column=column,
        column_type=column_type,
        expression=expression,
        expression_function=expression_function,
        condition=condition,
        condition_function=condition_function,
        ensure_lowercase=not flags["u"],
    )

    # Messages
    if len(cmd) == 2:
        gs.message(
            "No rows to update. Try a different SQL where or Python condition."
        )
    elif len(cmd) > 2:
        # First and last statement
        gs.verbose(f'Using SQL: "{cmd[1]}...{cmd[-2]}"')

    # The newline is needed for successful execution/reading of many statements.
    # TODO: Add error handling when there is a syntax error due to wrongly
    # generated SQL statement and/or sanitize the value in update more.
    gs.write_command("db.execute",
                     input="-",
                     database=database,
                     driver=driver,
                     stdin="\n".join(cmd))

    gs.vector_history(vector)
示例#27
0
def main():
    vector = options['map']
    table = options['table']
    layer = options['layer']
    columns = options['columns']
    key = options['key']

    # does map exist in CURRENT mapset?
    mapset = grass.gisenv()['MAPSET']
    if not grass.find_file(vector, element='vector', mapset=mapset)['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    map_name = vector.split('@')[0]

    if not table:
        if layer == '1':
            grass.verbose(_("Using vector map name as table name: <%s>") % map_name)
            table = map_name
        else:
            # to avoid tables with identical names on higher layers
            table = "%s_%s" % (map_name, layer)
            grass.verbose(
                _("Using vector map name extended by layer number as table name: <%s>") %
                table)
    else:
        grass.verbose(_("Using user specified table name: %s") % table)

    # check if DB parameters are set, and if not set them.
    grass.run_command('db.connect', flags='c')
    grass.verbose(_("Creating new DB connection based on default mapset settings..."))
    kv = grass.db_connection()
    database = kv['database']
    driver = kv['driver']
    schema = kv['schema']

    # maybe there is already a table linked to the selected layer?
    nuldev = file(os.devnull, 'w')
    try:
        grass.vector_db(map_name, stderr=nuldev)[int(layer)]
        grass.fatal(_("There is already a table linked to layer <%s>") % layer)
    except KeyError:
        pass

    # maybe there is already a table with that name?
    tables = grass.read_command('db.tables', flags='p', database=database, driver=driver,
                                stderr=nuldev)

    if not table in tables.splitlines():
        if columns:
            column_def = [x.strip().lower() for x in columns.strip().split(',')]
        else:
            column_def = []

        # if not existing, create it:
        column_def_key = "%s integer" % key
        if column_def_key not in column_def:
            column_def.insert(0, column_def_key)
        column_def = ','.join(column_def)

        grass.verbose(_("Creating table with columns (%s)...") % column_def)

        sql = "CREATE TABLE %s (%s)" % (table, column_def)
        try:
            grass.run_command('db.execute',
                              database=database, driver=driver, sql=sql)
        except CalledModuleError:
            grass.fatal(_("Unable to create table <%s>") % table)

    # connect the map to the DB:
    if schema:
        table = '{schema}.{table}'.format(schema=schema, table=table)
    grass.run_command('v.db.connect', quiet=True,
                      map=map_name, database=database, driver=driver,
                      layer=layer, table=table, key=key)

    # finally we have to add cats into the attribute DB to make modules such as v.what.rast happy:
    # (creates new row for each vector line):
    grass.run_command('v.to.db', map=map_name, layer=layer,
                      option='cat', column=key, qlayer=layer)

    grass.verbose(_("Current attribute table links:"))
    if grass.verbosity() > 2:
        grass.run_command('v.db.connect', flags='p', map=map_name)

    # write cmd history:
    grass.vector_history(map_name)

    return 0
示例#28
0
def main():
    global tmp, sqltmp, tmpname, nuldev, vector, rastertmp
    rastertmp = False
    #### setup temporary files
    tmp = grass.tempfile()
    sqltmp = tmp + ".sql"
    # we need a random name
    tmpname = grass.basename(tmp)

    nuldev = file(os.devnull, 'w')

    raster = options['raster']
    colprefix = options['column_prefix']
    vector = options['map']
    layer = options['layer']
    percentile = options['percentile']
    basecols = options['method'].split(',')

    ### setup enviro vars ###
    env = grass.gisenv()
    mapset = env['MAPSET']

    vs = vector.split('@')
    if len(vs) > 1:
        vect_mapset = vs[1]
    else:
        vect_mapset = mapset

    # does map exist in CURRENT mapset?
    if vect_mapset != mapset or not grass.find_file(vector, 'vector', mapset)['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    vector = vs[0]

    rastertmp = "%s_%s" % (vector, tmpname)

    # check the input raster map
    if not grass.find_file(raster, 'cell')['file']:
        grass.fatal(_("Raster map <%s> not found") % raster)

    # save current settings:
    grass.use_temp_region()

    # Temporarily aligning region resolution to $RASTER resolution
    # keep boundary settings
    grass.run_command('g.region', align=raster)

    grass.message(_("Preprocessing input data..."))
    try:
        grass.run_command('v.to.rast', input=vector, layer=layer, output=rastertmp,
                          use='cat', quiet=True)
    except CalledModuleError:
        grass.fatal(_("An error occurred while converting vector to raster"))

    # dump cats to file to avoid "too many argument" problem:
    p = grass.pipe_command('r.category', map=rastertmp, sep=';', quiet=True)
    cats = []

    for line in p.stdout:
        cats.append(line.rstrip('\r\n').split(';')[0])
    p.wait()

    number = len(cats)
    if number < 1:
        grass.fatal(_("No categories found in raster map"))

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map=vector)[int(layer)]
    except KeyError:
        grass.fatal(_('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # Find out which table is linked to the vector map on the given layer
    if not fi['table']:
        grass.fatal(_('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'))

    # replaced by user choiche
    #basecols = ['n', 'min', 'max', 'range', 'mean', 'stddev', 'variance', 'cf_var', 'sum']

    # we need at least three chars to distinguish [mea]n from [med]ian
    # so colprefix can't be longer than 6 chars with DBF driver
    if dbfdriver:
        colprefix = colprefix[:6]
        variables_dbf = {}

    # by default perccol variable is used only for "variables" variable
    perccol = "percentile"
    perc = None
    for b in basecols:
        if b.startswith('p'):
            perc = b
    if perc:
        # namespace is limited in DBF but the % value is important
        if dbfdriver:
            perccol = "per" + percentile
        else:
            perccol = "percentile_" + percentile
        percindex = basecols.index(perc)
        basecols[percindex] = perccol

    # dictionary with name of methods and position in "r.univar -gt"  output
    variables = {'number': 2, 'minimum': 4, 'maximum': 5, 'range': 6,
                 'average': 7, 'stddev': 9, 'variance': 10, 'coeff_var': 11,
                 'sum': 12, 'first_quartile': 14, 'median': 15,
                 'third_quartile': 16, perccol: 17}
    # this list is used to set the 'e' flag for r.univar
    extracols = ['first_quartile', 'median', 'third_quartile', perccol]
    addcols = []
    colnames = []
    extstat = ""
    for i in basecols:
        # this check the complete name of out input that should be truncated
        for k in variables.keys():
            if i in k:
                i = k
                break
        if i in extracols:
            extstat = 'e'
        # check if column already present
        currcolumn = ("%s_%s" % (colprefix, i))
        if dbfdriver:
            currcolumn = currcolumn[:10]
            variables_dbf[currcolumn.replace("%s_" % colprefix, '')] = i

        colnames.append(currcolumn)
        if currcolumn in grass.vector_columns(vector, layer).keys():
            if not flags['c']:
                grass.fatal((_("Cannot create column <%s> (already present). ") % currcolumn) +
                             _("Use -c flag to update values in this column."))
        else:
            if i == "n":
                coltype = "INTEGER"
            else:
                coltype = "DOUBLE PRECISION"
            addcols.append(currcolumn + ' ' + coltype)

    if addcols:
        grass.verbose(_("Adding columns '%s'") % addcols)
        try:
            grass.run_command('v.db.addcolumn', map=vector, columns=addcols,
                              layer=layer)
        except CalledModuleError:
            grass.fatal(_("Adding columns failed. Exiting."))

    # calculate statistics:
    grass.message(_("Processing input data (%d categories)...") % number)

    # get rid of any earlier attempts
    grass.try_remove(sqltmp)

    f = file(sqltmp, 'w')

    # do the stats
    p = grass.pipe_command('r.univar', flags='t' + extstat, map=raster,
                           zones=rastertmp, percentile=percentile, sep=';')

    first_line = 1

    f.write("{}\n".format(grass.db_begin_transaction(fi['driver'])))
    for line in p.stdout:
        if first_line:
            first_line = 0
            continue

        vars = line.rstrip('\r\n').split(';')

        f.write("UPDATE %s SET" % fi['table'])
        first_var = 1
        for colname in colnames:
            variable = colname.replace("%s_" % colprefix, '', 1)
            if dbfdriver:
                variable = variables_dbf[variable]
            i = variables[variable]
            value = vars[i]
            # convert nan, +nan, -nan, inf, +inf, -inf, Infinity, +Infinity,
            # -Infinity to NULL
            if value.lower().endswith('nan') or 'inf' in value.lower():
                value = 'NULL'
            if not first_var:
                f.write(" , ")
            else:
                first_var = 0
            f.write(" %s=%s" % (colname, value))

        f.write(" WHERE %s=%s;\n" % (fi['key'], vars[0]))
    f.write("{}\n".format(grass.db_commit_transaction(fi['driver'])))
    p.wait()
    f.close()

    grass.message(_("Updating the database ..."))
    exitcode = 0
    try:
        grass.run_command('db.execute', input=sqltmp,
                          database=fi['database'], driver=fi['driver'])
        grass.verbose((_("Statistics calculated from raster map <{raster}>"
                         " and uploaded to attribute table"
                         " of vector map <{vector}>."
                         ).format(raster=raster, vector=vector)))
    except CalledModuleError:
        grass.warning(_("Failed to upload statistics to attribute table of vector map <%s>.") % vector)
        exitcode = 1

    sys.exit(exitcode)
示例#29
0
def main():
    global tmp, prefix
    tmp = grass.tempfile()
    prefix = 'concave_hull_tmp_%d' % os.getpid()

    input = options['input']
    output = options['output']
    perc = options['threshold']

    perc = float(perc) + 90

    delaunay = prefix + '_delaunay'

    grass.message(_("Delaunay triangulation..."))
    grass.run_command('v.delaunay', input=input, output=delaunay, quiet=True)

    out_points = prefix + '_delaunay_pnts'
    out_lines_nocat = prefix + '_delaunay_lines_nocat'
    out_lines = prefix + '_delaunay_lines'
    out_lines_tmp = prefix + '_delaunay_lines_tmp'

    grass.message(_("Geometry conversion..."))
    grass.run_command('v.extract',
                      input=delaunay,
                      output=out_lines_tmp,
                      type='boundary',
                      layer='-1',
                      quiet=True)
    grass.run_command('v.type',
                      input=out_lines_tmp,
                      output=out_lines_nocat,
                      from_type='boundary',
                      to_type='line',
                      quiet=True)
    grass.run_command('v.type',
                      input=delaunay,
                      output=out_points,
                      from_type='centroid',
                      to_type='point',
                      quiet=True)

    grass.run_command('v.category',
                      input=out_lines_nocat,
                      output=out_lines,
                      op='add',
                      type='line',
                      quiet=True)
    grass.run_command('v.db.addtable',
                      map=out_lines,
                      col='cat integer,length double precision',
                      quiet=True)

    grass.message(_("Evaluating threshold..."))
    grass.run_command('v.to.db',
                      map=out_lines,
                      type='line',
                      op='length',
                      col='length',
                      quiet=True)

    db_info = grass.vector_db(map=out_lines, layer='1')[1]
    table = db_info['table']
    database = db_info['database']
    driver = db_info['driver']
    sql = "SELECT length FROM %s" % (table)
    tmpf = file(tmp, 'w')
    grass.run_command('db.select',
                      flags='c',
                      table=table,
                      database=database,
                      driver=driver,
                      sql=sql,
                      stdout=tmpf)
    tmpf.close()

    # check if result is empty
    tmpf = file(tmp)
    if tmpf.read(1) == '':
        grass.fatal(_("Table <%s> contains no data.") % table)
    tmpf.close()

    N = 0
    tmpf = file(tmp)
    for line in tmpf:
        N += 1
    tmpf.close()

    max_length = 0.0
    sortfile(tmp, tmp + ".sort")
    ppos = round(N * perc / 100)

    perc_orig = perc
    while ppos >= N and perc >= 90:
        perc -= 1
        ppos = round(N * perc / 100)

    if perc == 89:
        grass.fatal(_("Cannot calculate hull. Too few points."))

    if perc_orig > perc:
        thresh = int(perc) - 90
        grass.warning(_('Threshold reduced to %d to calculate hull' % thresh))

    inf = file(tmp + ".sort")
    l = 0
    for line in inf:
        if l == ppos:
            max_length = float(line.rstrip('\r\n'))
            break
        l += 1
    inf.close()

    grass.message(_("Feature selection..."))
    lines_concave = prefix + '_delaunay_lines_select'
    lines_concave_nocat = prefix + '_delaunay_lines_select_nocat'
    grass.run_command('v.extract',
                      input=out_lines,
                      output=lines_concave,
                      type='line',
                      where='length < %f' % max_length,
                      quiet=True)

    grass.run_command('v.category',
                      input=lines_concave,
                      output=lines_concave_nocat,
                      type='line',
                      op='del',
                      cat='-1',
                      quiet=True)

    borders_concave = prefix + '_delaunay_borders_select'
    grass.run_command('v.type',
                      input=lines_concave_nocat,
                      output=borders_concave,
                      from_type='line',
                      to_type='boundary',
                      quiet=True)

    areas_concave = prefix + '_delaunay_areas_select'
    grass.run_command('v.centroids',
                      input=borders_concave,
                      output=areas_concave,
                      quiet=True)
    grass.run_command('v.db.droptable',
                      map=areas_concave,
                      flags='f',
                      quiet=True)
    grass.run_command('v.db.addtable',
                      map=areas_concave,
                      col='cat integer,count integer',
                      quiet=True)

    grass.run_command('v.vect.stats',
                      points=out_points,
                      areas=areas_concave,
                      ccolumn='count',
                      quiet=True)

    areas_concave_extr = prefix + '_delaunay_areas_extract'

    grass.run_command('v.extract',
                      input=areas_concave,
                      output=areas_concave_extr,
                      type='area',
                      where='count = 1',
                      quiet=True)

    grass.message(_("The following warnings can be ignored"), flag='i')
    grass.run_command('v.dissolve',
                      input=areas_concave_extr,
                      output=output,
                      col='count',
                      layer='1',
                      quiet=True)
    grass.message(_("Concave hull successfully created"))
示例#30
0
def main():
    check_progs()
    
    inmap = options['input']
    output = options['ldm']
    width = options['width']
    color = options['color']
    graph = options['graph']
    ldm_type = options['type']

    mapset = grass.gisenv()['MAPSET']

    global tmp, nuldev, grass_version
    nuldev = None

    grass_version = grass.version()['version'][0]
    if grass_version != '7':
        grass.fatal(_("Sorry, this script works in GRASS 7.* only"))

    # setup temporary files
    tmp = grass.tempfile()
    
    # check for LatLong location
    if grass.locn_is_latlong() == True:
        grass.fatal("Module works only in locations with cartesian coordinate system")


    # check if input file exists
    if not grass.find_file(inmap, element = 'vector')['file']:
        grass.fatal(_("<%s> does not exist.") % inmap)
        
    # check for lines
    iflines = grass.vector_info_topo(inmap)['lines']
    if iflines == 0:
        grass.fatal(_("Map <%s> has no lines.") % inmap)
    

    # diplay options 
    if flags['x']:
        env = grass.gisenv()
        mon = env.get('MONITOR', None)
        if not mon:
            if not graph:
                grass.fatal(_("Please choose \"graph\" output file with LDM graphics or not use flag \"x\""))

    
    ####### DO IT #######
    # copy input vector map and drop table
    grass.run_command('g.copy', vect = (inmap, 'v_ldm_vect'), quiet = True, stderr = nuldev)
    db = grass.vector_db('v_ldm_vect')
    if db != {}:
        grass.run_command('v.db.droptable', map_ = 'v_ldm_vect', flags = 'f', quiet = True, stderr = nuldev)

    # compute mean center of lines with v.mc.py module
    center_coords = grass.read_command('v.mc.py', input_ = inmap, type_ = 'line',
                                quiet = True, stderr = nuldev).strip()
    mc_x = center_coords.split(' ')[0]
    mc_y = center_coords.split(' ')[1]

    center_coords = str(mc_x) + ',' + str(mc_y)

    ### 
    inmap = 'v_ldm_vect'

    # count lines
    count = grass.vector_info_topo(inmap)['lines']

    # add temp table with azimuths and lengths of lines
    in_cats = inmap + '_cats'    
    grass.run_command('v.category', input_ = inmap, option = 'add', 
                      output = in_cats, quiet = True, stderr = nuldev)
    grass.run_command('v.db.addtable', map_ = in_cats, table = 'tmp_tab', 
                      columns = 'sum_azim double, len double', quiet = True, stderr = nuldev)
    grass.run_command('v.db.connect', map_ = in_cats, table = 'tmp_tab', 
                      flags = 'o', quiet = True, stderr = nuldev)
    grass.run_command('v.to.db', map_ = in_cats, opt = 'azimuth', 
                      columns = 'sum_azim', units = 'radians', quiet = True, stderr = nuldev)
    grass.run_command('v.to.db', map_ = in_cats, opt = 'length',  
                      columns = 'len', units = 'meters', quiet = True, stderr = nuldev)    

    # find end azimuth
    p = grass.pipe_command('v.db.select', map_ = in_cats, columns = 'sum_azim', flags = 'c', quiet = True, stderr = nuldev)
    c = p.communicate()[0].strip().split('\n')

    sin = []
    cos = []
    
    for i in c:
        s1 = math.sin(float(i))
        c1 = math.cos(float(i))
        sin.append(s1)
        cos.append(c1)

    ca_sin = sum(map(float,sin))
    ca_cos = sum(map(float,cos))
    
    atan = math.atan2(ca_sin,ca_cos)
    end_azim = math.degrees(atan)

    # find compass angle    
    if end_azim < 0:
        a2 = -(end_azim)
    if end_azim > 0:
        a2 = end_azim
    if (ca_sin > 0) and (ca_cos > 0):
        comp_angle = a2
    if (ca_sin > 0) and (ca_cos < 0):
        comp_angle = a2
    if (ca_sin < 0) and (ca_cos > 0):
        comp_angle = 360 - a2
    if (ca_sin < 0) and (ca_cos < 0):
        comp_angle = 360 - a2

    # find LDM
    if end_azim < 0:
        a2 = -(end_azim)
    if end_azim > 0:
        a2 = end_azim
    if (ca_sin > 0) and (ca_cos > 0):
        ldm = 90 - a2
    if (ca_sin > 0) and (ca_cos < 0):
        ldm = 450 - a2
    if (ca_sin < 0) and (ca_cos > 0):
        ldm = 90 + a2
    if (ca_sin < 0) and (ca_cos < 0):
        ldm = 90 + a2

    # find circular variance
    sin_pow = math.pow(ca_sin,2) 
    cos_pow = math.pow(ca_cos,2) 

    circ_var = 1-(math.sqrt(sin_pow+cos_pow))/count

    # find start/end points of "mean" line
    end_azim_dms = decimal2dms(end_azim)

    # if end_azim < 0:
    #     end_azim_dms = '-' + (str(end_azim_dms))

    start_azim = 180 - end_azim
    start_azim_dms = decimal2dms(start_azim)
    
    p = grass.pipe_command('v.db.select', map_ = in_cats, columns = 'len',
                           flags = 'c', quiet = True, stderr = nuldev)
    c = p.communicate()[0].strip().split('\n')

    mean_length = sum(map(float,c))/len(c)
    half_length = float(mean_length)/2

    tmp1 = tmp + '.inf'
    inf1 = file(tmp1, 'w')
    print >> inf1, 'N ' + str(end_azim_dms) + ' E ' + str(half_length)
    inf1.close()
    
    end_coords = grass.read_command('m.cogo', input_ = tmp1, output = '-',
                                    coord = center_coords, quiet = True).strip()

    tmp2 = tmp + '.inf2'
    inf2 = file(tmp2, 'w')
    print >> inf2, 'N ' + str(start_azim_dms) + ' W ' + str(half_length)
    inf2.close()

    start_coords = grass.read_command('m.cogo', input_ = tmp2, output = '-',
                                      coord = center_coords, quiet = True).strip()

    # make "arrowhead" symbol
    if flags['x'] or graph:
        tmp3 = tmp + '.arrowhead_1'
        outf3 = file(tmp3, 'w')

        if ldm_type == 'direct':
            t1 = """VERSION 1.0
BOX -0.5 -0.5 0.5 0.5
POLYGON
  RING
  FCOLOR NONE
    LINE
      0 0
      0.3 -1
    END
  END
POLYGON
  RING
  FCOLOR NONE
    LINE
      0 0
      -0.3 -1
    END
  END
END
"""
            outf3.write(t1)
            outf3.close()
    
            gisdbase = grass.gisenv()['GISDBASE']
            location = grass.gisenv()['LOCATION_NAME']
            mapset = grass.gisenv()['MAPSET']
            symbols_dir = os.path.join(gisdbase, location, mapset, 'symbol', 'arrows')
            symbol = os.path.join(symbols_dir, 'arrowhead_1')
    
            if not os.path.exists(symbols_dir):
                try:
                    os.makedirs(symbols_dir)
                except OSError:
                    pass
        
            if not os.path.isfile(symbol):
                shutil.copyfile(tmp3, symbol)
        
    
        # write LDM graph file and optionally display line of LDM with an arrow
    tmp4 = tmp + '.ldm'
    outf4 = file(tmp4, 'w')
    
    arrow_size = int(width) * 1.4
    arrow_azim = 360 - float(end_azim)

    if ldm_type == 'direct':
        t2 = string.Template("""
move $start_coords
width $width
color $color
draw $end_coords

rotation $arrow_azim
width $width
symbol $symbol_s $arrow_size $end_coords $color
""")    
        s2 = t2.substitute(start_coords = start_coords, width = width, color = color,
                       end_coords = end_coords, arrow_azim = arrow_azim,
                       symbol_s = "arrows/arrowhead_1", arrow_size = arrow_size)
    else:
        t2 = string.Template("""
move $start_coords
width $width
color $color
draw $end_coords
""")    
        s2 = t2.substitute(start_coords = start_coords, width = width, color = color,
                       end_coords = end_coords)

    outf4.write(s2)
    outf4.close()

    if graph:
        shutil.copy(tmp4, graph)



    # save LDM line to vector if option "output" set  
    if output:
        tmp5 = tmp + '.line'
        outf5 = file(tmp5, 'w')

        print >> outf5, str(start_coords)
        print >> outf5, str(end_coords)

        outf5.close()

        grass.run_command('v.in.lines', input_ = tmp5, output = output,
                              separator = " ", overwrite = True, quiet = True)

        out_cats = output + '_cats'
        grass.run_command('v.category', input_ = output, option = 'add', 
                          output = out_cats, quiet = True, stderr = nuldev)
        grass.run_command('g.rename', vect = (out_cats,output), 
                          overwrite = True, quiet = True, stderr = nuldev)
        
        if circ_var:
            col = 'comp_angle double,dir_mean double,cir_var double,ave_x double,ave_y double,ave_len double'
        else:
            col = 'comp_angle double,dir_mean double,ave_x double,ave_y double,ave_len double'
                        
        grass.run_command('v.db.addtable', map_ = output, columns = col, quiet = True, stderr = nuldev)

        tmp6 = tmp + '.sql'
        outf6 = file(tmp6, 'w')
                
        t3 = string.Template("""
UPDATE $output SET comp_angle = $comp_angle;
UPDATE $output SET dir_mean = $ldm;
UPDATE $output SET ave_x = $mc_x;
UPDATE $output SET ave_y = $mc_y;
UPDATE $output SET ave_len = $mean_length;
""")
        s3 = t3.substitute(output = output, comp_angle = ("%0.3f" % comp_angle),
                           ldm = ("%0.3f" % ldm), mc_x = ("%0.3f" % float(mc_x)),
                           mc_y = ("%0.3f" % float(mc_y)), mean_length = ("%0.3f" % mean_length))
        outf6.write(s3)

        if circ_var:
            print >> outf6, "UPDATE %s SET cir_var = %0.3f;" % (output, circ_var)

        outf6.close()

        grass.run_command('db.execute', input_ = tmp6, quiet = True, stderr = nuldev)


    # print LDM parameters to stdout (with <-g> flag in shell style):
    print_out = ['Compass Angle', 'Directional Mean', 'Average Center', 'Average Length']
    if circ_var:
        print_out.append('Circular Variance')
        
    print_shell = ['compass_angle', 'directional_mean', 'average_center',
                   'average_length', 'circular_variance']
    if circ_var:
        print_shell.append('circular_variance')
        
    print_vars = ["%0.3f" % comp_angle, "%0.3f" % ldm,
                  "%0.3f" % float(mc_x) + ',' + "%0.3f" % float(mc_y),
                  "%0.3f" % mean_length]
    if circ_var:
        print_vars.append("%0.3f" % circ_var)


    if flags['g']:
        for i,j in zip(print_shell, print_vars):
            print "%s=%s" % (i, j)
    else:
        for i,j in zip(print_out, print_vars):
            print "%s: %s" % (i, j)


    # diplay LDM graphics
    if flags['x']:
        if mon:
            if graph:
                grass.run_command('d.graph', input_ = graph, flags = 'm', quiet = True, stderr = nuldev)
            else:
                grass.run_command('d.graph', input_ = tmp4, flags = 'm', quiet = True, stderr = nuldev)
        elif graph:
            grass.message(_("\n Use this command in wxGUI \"Command console\" or with <d.mon> or with \"command layer\" to display LDM graphics: \n d.graph -m input=%s \n\n" ) % graph)
示例#31
0
def main():
    vector = options["map"]
    layer = options["layer"]
    column = options["column"]
    value = options["value"]
    qcolumn = options["query_column"]
    where = options["where"]
    sqlitefile = options["sqliteextra"]

    mapset = grass.gisenv()["MAPSET"]

    # does map exist in CURRENT mapset?
    if not grass.find_file(vector, element="vector", mapset=mapset)["file"]:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    try:
        f = grass.vector_db(vector)[int(layer)]
    except KeyError:
        grass.fatal(
            _(
                "There is no table connected to this map. Run v.db.connect or v.db.addtable first."
            )
        )

    table = f["table"]
    database = f["database"]
    driver = f["driver"]

    # check for SQLite backend for extra functions
    if sqlitefile and driver != "sqlite":
        grass.fatal(_("Use of libsqlitefunctions only with SQLite backend"))
    if driver == "sqlite" and sqlitefile:
        if not os.access(sqlitefile, os.R_OK):
            grass.fatal(_("File <%s> not found") % sqlitefile)

    # Check column existence and get its type.
    all_columns = grass.vector_columns(vector, layer)
    coltype = None
    for column_name, column_record in all_columns.items():
        if column.lower() == column_name.lower():
            coltype = column_record["type"]
            break
    if not coltype:
        grass.fatal(_("Column <%s> not found") % column)

    if qcolumn:
        if value:
            grass.fatal(_("<value> and <qcolumn> are mutually exclusive"))
        # special case: we copy from another column
        value = qcolumn
    else:
        if not value:
            grass.fatal(_("Either <value> or <qcolumn> must be given"))
        # we insert a value
        if coltype.upper() not in ["INTEGER", "DOUBLE PRECISION"]:
            value = "'%s'" % value

    cmd = "UPDATE %s SET %s=%s" % (table, column, value)
    if where:
        cmd += " WHERE " + where

    # SQLite: preload extra functions from extension lib if provided by user
    if sqlitefile:
        sqliteload = "SELECT load_extension('%s');\n" % sqlitefile
        cmd = sqliteload + cmd

    grass.verbose('SQL: "%s"' % cmd)
    grass.write_command(
        "db.execute", input="-", database=database, driver=driver, stdin=cmd
    )

    # write cmd history:
    grass.vector_history(vector)

    return 0
示例#32
0
def setupPro(resourcedir='mySWIM',parfile='mySWIM/myswim.py'):
    '''Set up all files needed for SWIM and mySWIM and write mySWIM resource dirs
    and parameterfile. In this function, grass input arguments are all in options
    without any removal of empty ones ''!
    '''
    # collect parameters in dict
    p = {}
    
    # check if needed params are set
    for e in ['proname','prodir','stationsvect','simf','subbasins','hydrotopes',
              'csvdata']:
        if len(options['proname'])<2: grass.fatal('%s must be set.' %e)
        p[e] = options[e]
    
    # change into prodir as all subsequent paths maybe relative to that
    os.chdir(p['prodir'])

    # check if in same mapset
    stationsinfo = grass.vector_info(options['stationsvect'])
    if stationsinfo['mapset']!=grass.gisenv()['MAPSET']:
        grass.fatal('Must be in same mapset as %s.' %(options['stationsvect']))
  
    # find files in prodir
    ffiles = {'bsn':'.bsn','cod':'.cod','strf':'.str'}
    ffiles = dict([(e,options['proname']+ffiles[e]) for e in ffiles])
    foundf = findFiles(ffiles,findin=p['prodir'])
    # add to pro
    p.update(foundf)
    
    ### check for other swim files
    swimfiles = {'swim':'swim',
                 'soilcio':'soil.cio',
                 'filecio':'file.cio',
                 'figf':p['proname']+'.fig',
                 'runoff':'runoff.dat',
                 'simf':'rvaddQ_subbasin.prn',
                 'conf':'swim.conf',
                 'clim1':'clim1.dat',
                 'clim2':'clim2.dat',
                 'cntab':'cntab.dat',
                 'cropd':'crop.dat',
                 'subcatchf':'subcatch.def',
                 'subcatchbsn':'subcatch.bsn',
                 'wgen':'wgen.dat',
                 'wstor':'wstor.dat'}
    # check for files with warning if not exists
    swimfiles = findFiles(swimfiles,findin=p['prodir'],fatal=False)
    p.update(swimfiles) # only those that were found and not double    
    
    # simf
    p['simf']=options['simf']
    if len(options['simf'])>0 or os.path.exists(options['simf']):
        grass.warning('Simulated discharge file %s not found or not given.' %options['simf'])
    
    # check if mySWIM already exists and if not create dir
    resourcedirs = {'resourcedir':os.path.join(p['prodir'],resourcedir)}
    for n,d in [('Qdir','Q_files'),('clusterdir','lljobs'),('pestdir','pest')]:
        resourcedirs[n] = os.path.join(resourcedir,d)
    for n,d in resourcedirs.items():
        if os.path.exists(d):
            gm('Found the resource dir in: %s' %d)
        else:
            gm('Creating new resource dir in: %s' %d)
            os.makedirs(d)
    # attach to p
    p.update(resourcedirs)
    
    # climate dir
    p['climatedir'] = options['climatedir']
    
    # pest parameters
    p['pestdir'] = os.path.join(resourcedir,'pest')
    p['pestexe'] = os.path.join(resourcedir,'pest/pest')
    p['pestinstructions'] = options['pestinstructions']
    
    # get parameters from bsn file
    try: par,tmplt = swim.readBsnf(p['bsn'])
    except:
        grass.fatal('''Cant read the bsn file properly. Is it formatted like this:\n
        switch parameters
        value  param
        value  param
        ...
        _______________________________________________________________________
        basin, initialisation & calibration parameters
        param  param ... param description blabla
        value  value ... value
        ...
        _______________________________________________________________________
        CO2 EFFECT ON NET PHOTOSYNTHESIS (alpha) & TRANSPIRATION (beta)
        (ialpha,ibeta) = (1,0) OR (1,1) ONLY FOR SCENARIO PERIODS!
        ialpha    ibeta     C3C4crop  CO2-ref   CO2-scen
        0/1       0/1       3/4       346       406-436   OPTIONS & RANGES
        0         0         0         0         0
        _______________________________________________________________________''')
        
    gm('Found these parameters in the bsn file: %s' %','.join(par.keys()))
    gm('Those parameters will be saved in the future!')
    # make parameter save formats
    pfmts = [(e,{True:'14.3f',False:'7.4f'}[par[e]>50]) for e in sorted(par.keys())]
    
    # check if resource files exists and if not create them
    rfiles = {'runsf': [('runID','04i'),('paramID','04i'),('climID','04i'),
                        ('runTIME','26s'),('station','5s'),('NSE','+7.3f'),
                        ('bias','+6.1f'),('start','10s'),('end','10s'),
                        ('purpose','-12s'),('notes','-64s')],
              'paramf': [('paramID','04i')]+pfmts,
              'climsf': [('climID','04i'), ('title','50s'),
                         ('start','10s'), ('end','10s'), ('notes','-64s')]}
    for f in rfiles:
        name = os.path.join(resourcedir,'%s.%s' %(p['proname'],f[:-1]))
        # exists?
        if os.path.exists(name):
            gm('%s file exists in: %s' %(f,name))
            p[f] = name
        else: # create
            writeTxtDB(name,rfiles[f])
            p[f] = name
        # add sqlite tables to same database as self.stations
        tblname = f[:-1]
        sqlTbl(tblname,rfiles[f])
    
    # save sqlitedb
    p['sqlitedb'] = grass.vector_db(options['stationsvect'])[1]['database']
    
    # get stations info and data
    p['stations'] = getStations(resourcedir=resourcedir)
    p['obsf']     = os.path.join(resourcedir,'observations.csv')
    
    # llcmds
    if options['llcmds']!='':
        p['llcmds'] = dict([cmd.split(':') for cmd in options['llcmds'].split(',')])
    
    # write all in p to a parameter file
    parfpath = os.path.join(resourcedir,'myswim.py')
    if os.path.exists(parfpath):
        pars = imp.load_source('myswim',parfpath)
        opars= {e:pars.__getattribute__(e) for e in dir(pars) if e not in p and not e.startswith('__')}
        gm('Previous myswim parameter file found! Preserved these parameter: %r' %opars)
        p.update(opars)
    # now write 
    parf = file(parfpath,'w')
    parf.write('### mySWIM parameter file, saved on: %s\n' %dt.datetime.now())
    for e in sorted(p.keys()): parf.write('%s=%r\n' %(e,p[e]))
    
    gm('All set up! To proceed, uncheck the -0 flag and change into %s' %p['prodir'])
    return
示例#33
0
def main():
    vector = options['map']
    layer = options['layer']
    column = options['column']
    value = options['value']
    qcolumn = options['query_column']
    where = options['where']
    sqlitefile = options['sqliteextra']

    mapset = grass.gisenv()['MAPSET']

    # does map exist in CURRENT mapset?
    if not grass.find_file(vector, element='vector', mapset=mapset)['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    try:
        f = grass.vector_db(vector)[int(layer)]
    except KeyError:
        grass.fatal(
            _('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'
              ))

    table = f['table']
    database = f['database']
    driver = f['driver']

    # check for SQLite backend for extra functions
    if sqlitefile and driver != "sqlite":
        grass.fatal(_("Use of libsqlitefunctions only with SQLite backend"))
    if driver == "sqlite" and sqlitefile:
        if not os.access(sqlitefile, os.R_OK):
            grass.fatal(_("File <%s> not found") % sqlitefile)

    # checking column types
    try:
        coltype = grass.vector_columns(vector, layer)[column]['type']
    except KeyError:
        grass.fatal(_('Column <%s> not found') % column)

    if qcolumn:
        if value:
            grass.fatal(_('<value> and <qcolumn> are mutually exclusive'))
        # special case: we copy from another column
        value = qcolumn
    else:
        if not value:
            grass.fatal(_('Either <value> or <qcolumn> must be given'))
        # we insert a value
        if coltype.upper() not in ["INTEGER", "DOUBLE PRECISION"]:
            value = "'%s'" % value

    cmd = "UPDATE %s SET %s=%s" % (table, column, value)
    if where:
        cmd += " WHERE " + where

    # SQLite: preload extra functions from extension lib if provided by user
    if sqlitefile:
        sqliteload = "SELECT load_extension('%s');\n" % sqlitefile
        cmd = sqliteload + cmd

    grass.verbose("SQL: \"%s\"" % cmd)
    grass.write_command('db.execute',
                        input='-',
                        database=database,
                        driver=driver,
                        stdin=cmd)

    # write cmd history:
    grass.vector_history(vector)

    return 0
示例#34
0
def main():
    global tmp, sqltmp, tmpname, nuldev, vector, mask_found, rastertmp
    mask_found = False
    rastertmp = False
    #### setup temporary files
    tmp = grass.tempfile()
    sqltmp = tmp + ".sql"
    # we need a random name
    tmpname = grass.basename(tmp)

    nuldev = file(os.devnull, 'w')

    raster = options['raster']
    colprefix = options['column_prefix']
    vector = options['vector']
    layer = options['layer']
    percentile = options['percentile']

    ### setup enviro vars ###
    env = grass.gisenv()
    mapset = env['MAPSET']

    vs = vector.split('@')
    if len(vs) > 1:
	vect_mapset = vs[1]
    else:
	vect_mapset = mapset

    # does map exist in CURRENT mapset?
    if vect_mapset != mapset or not grass.find_file(vector, 'vector', mapset)['file']:
	grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    vector = vs[0]

    rastertmp = "%s_%s" % (vector, tmpname)

    # check the input raster map
    if not grass.find_file(raster, 'cell')['file']:
	grass.fatal(_("Raster map <%s> not found") % raster)

    # check presence of raster MASK, put it aside
    mask_found = bool(grass.find_file('MASK', 'cell')['file'])
    if mask_found:
	grass.message(_("Raster MASK found, temporarily disabled"))
	grass.run_command('g.rename', rast = ('MASK', tmpname + "_origmask"), quiet = True)

    # save current settings:
    grass.use_temp_region()

    # Temporarily aligning region resolution to $RASTER resolution
    # keep boundary settings
    grass.run_command('g.region', align = raster)

    # prepare raster MASK
    if grass.run_command('v.to.rast', input = vector, output = rastertmp,
			 use = 'cat', quiet = True) != 0:
	grass.fatal(_("An error occurred while converting vector to raster"))

    # dump cats to file to avoid "too many argument" problem:
    p = grass.pipe_command('r.category', map = rastertmp, fs = ';', quiet = True)
    cats = []
    for line in p.stdout:
	cats.append(line.rstrip('\r\n').split(';')[0])
    p.wait()

    number = len(cats)
    if number < 1:
	grass.fatal(_("No categories found in raster map"))

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map = vector)[int(layer)]
    except KeyError:
	grass.fatal(_('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # Find out which table is linked to the vector map on the given layer
    if not fi['table']:
	grass.fatal(_('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'))

    basecols = ['n', 'min', 'max', 'range', 'mean', 'stddev', 'variance', 'cf_var', 'sum']

    # we need at least three chars to distinguish [mea]n from [med]ian
    # so colprefix can't be longer than 6 chars with DBF driver
    if dbfdriver:
	colprefix = colprefix[:6]

    # do extended stats?
    if flags['e']:
	# namespace is limited in DBF but the % value is important
	if dbfdriver:
	    perccol = "per" + percentile
	else:
	    perccol = "percentile_" + percentile
	extracols = ['first_quartile', 'median', 'third_quartile'] + [perccol]
    else:
	extracols = []

    addcols = []
    for i in basecols + extracols:
	# check if column already present
	currcolumn = ("%s_%s" % (colprefix, i))
	if dbfdriver:
	    currcolumn = currcolumn[:10]

	if currcolumn in grass.vector_columns(vector, layer).keys():
	    if not flags['c']:
		grass.fatal((_("Cannot create column <%s> (already present). ") % currcolumn) +
			    _("Use -c flag to update values in this column."))
	else:
	    if i == "n":
		coltype = "INTEGER"
	    else:
		coltype = "DOUBLE PRECISION"
	    addcols.append(currcolumn + ' ' + coltype)

    if addcols:
	grass.verbose(_("Adding columns '%s'") % addcols)
	if grass.run_command('v.db.addcolumn', map = vector, columns = addcols) != 0:
	    grass.fatal(_("Adding columns failed. Exiting."))

    # calculate statistics:
    grass.message(_("Processing data (%d categories)...") % number)

    # get rid of any earlier attempts
    grass.try_remove(sqltmp)

    colnames = []
    for var in basecols + extracols:
	colname = '%s_%s' % (colprefix, var)
	if dbfdriver:
	    colname = colname[:10]
	colnames.append(colname)

    ntabcols = len(colnames)

    # do extended stats?
    if flags['e']:
	extstat = 'e'
    else:
	extstat = ""
	
    f = file(sqltmp, 'w')

    # do the stats
    p = grass.pipe_command('r.univar', flags = 't' + 'g' + extstat, map = raster, 
                      zones = rastertmp, percentile = percentile, fs = ';')

    first_line = 1
    for line in p.stdout:
	if first_line:
	    first_line = 0
	    continue

	vars = line.rstrip('\r\n').split(';')

	f.write("UPDATE %s SET" % fi['table'])
	i = 2
	first_var = 1
	for colname in colnames:
	    value = vars[i]
	    # convert nan, +nan, -nan to NULL
	    if value.lower().endswith('nan'):
		value = 'NULL'
	    if not first_var:
		f.write(" , ")
	    else:
		first_var = 0
	    f.write(" %s=%s" % (colname, value))
	    i += 1
	    # skip n_null_cells, mean_of_abs, sum_of_abs
	    if i == 3 or i == 8 or i == 13:
		i += 1

	f.write(" WHERE %s=%s;\n" % (fi['key'], vars[0]))

    p.wait()
    f.close()

    grass.message(_("Updating the database ..."))
    exitcode = grass.run_command('db.execute', input = sqltmp,
				 database = fi['database'], driver = fi['driver'])

    grass.run_command('g.remove', rast = 'MASK', quiet = True, stderr = nuldev)

    if exitcode == 0:
	grass.message((_("Statistics calculated from raster map <%s>") % raster) +
		      (_(" and uploaded to attribute table of vector map <%s>.") % vector))
    else:
	grass.warning(_("Failed to upload statistics to attribute table of vector map <%s>.") % vector)
    
    
    sys.exit(exitcode)
示例#35
0
def main():
    # old connection
    old_database = options["old_database"]
    old_schema = options["old_schema"]
    # new connection
    default_connection = gscript.db_connection()
    if options["new_driver"]:
        new_driver = options["new_driver"]
    else:
        new_driver = default_connection["driver"]
    if options["new_database"]:
        new_database = options["new_database"]
    else:
        new_database = default_connection["database"]
    if options["new_schema"]:
        new_schema = options["new_schema"]
    else:
        new_schema = default_connection["schema"]

    if old_database == "":
        old_database = None
    old_database_subst = None
    if old_database is not None:
        old_database_subst = substitute_db(old_database)

    new_database_subst = substitute_db(new_database)

    if old_database_subst == new_database_subst and old_schema == new_schema:
        gscript.fatal(
            _("Old and new database connection is identical. "
              "Nothing to do."))

    mapset = gscript.gisenv()["MAPSET"]

    vectors = gscript.list_grouped("vect")[mapset]
    num_vectors = len(vectors)

    if flags["c"]:
        # create new database if not existing
        create_db(new_driver, new_database)

    i = 0
    for vect in vectors:
        vect = "%s@%s" % (vect, mapset)
        i += 1
        gscript.message(
            _("%s\nReconnecting vector map <%s> "
              "(%d of %d)...\n%s") %
            ("-" * 80, vect, i, num_vectors, "-" * 80))
        for f in gscript.vector_db(vect, stderr=nuldev).values():
            layer = f["layer"]
            schema_table = f["table"]
            key = f["key"]
            database = f["database"]
            driver = f["driver"]

            # split schema.table
            if "." in schema_table:
                schema, table = schema_table.split(".", 1)
            else:
                schema = ""
                table = schema_table

            if new_schema:
                new_schema_table = "%s.%s" % (new_schema, table)
            else:
                new_schema_table = table

            gscript.debug(
                "DATABASE = '%s' SCHEMA = '%s' TABLE = '%s' ->\n"
                "      NEW_DATABASE = '%s' NEW_SCHEMA_TABLE = '%s'" %
                (old_database, schema, table, new_database, new_schema_table))

            do_reconnect = True
            if old_database_subst is not None:
                if database != old_database_subst:
                    do_reconnect = False
            if database == new_database_subst:
                do_reconnect = False
            if schema != old_schema:
                do_reconnect = False

            if do_reconnect:
                gscript.verbose(_("Reconnecting layer %d...") % layer)

                if flags["c"]:
                    # check if table exists in new database
                    copy_tab(
                        driver,
                        database,
                        schema_table,
                        new_driver,
                        new_database,
                        new_schema_table,
                    )

                # drop original table if required
                if flags["d"]:
                    drop_tab(vect, layer, schema_table, driver,
                             substitute_db(database))

                # reconnect tables (don't use substituted new_database)
                # NOTE: v.db.connect creates an index on the key column
                try:
                    gscript.run_command(
                        "v.db.connect",
                        flags="o",
                        quiet=True,
                        map=vect,
                        layer=layer,
                        driver=new_driver,
                        database=new_database,
                        table=new_schema_table,
                        key=key,
                    )
                except CalledModuleError:
                    gscript.warning(
                        _("Unable to connect table <%s> to vector "
                          "<%s> on layer <%s>") % (table, vect, str(layer)))

            else:
                if database != new_database_subst:
                    gscript.warning(
                        _("Layer <%d> will not be reconnected "
                          "because database or schema do not "
                          "match.") % layer)
    return 0
示例#36
0
def main():
    global tmp, sqltmp, tmpname, nuldev, vector, rastertmp
    rastertmp = False
    # setup temporary files
    tmp = grass.tempfile()
    sqltmp = tmp + ".sql"
    # we need a random name
    tmpname = grass.basename(tmp)

    nuldev = open(os.devnull, 'w')

    rasters = options['raster'].split(',')
    colprefixes = options['column_prefix'].split(',')
    vector = options['map']
    layer = options['layer']
    percentile = options['percentile']
    basecols = options['method'].split(',')

    ### setup enviro vars ###
    env = grass.gisenv()
    mapset = env['MAPSET']

    vs = vector.split('@')
    if len(vs) > 1:
        vect_mapset = vs[1]
    else:
        vect_mapset = mapset

    # does map exist in CURRENT mapset?
    if vect_mapset != mapset or not grass.find_file(vector, 'vector',
                                                    mapset)['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map=vector)[int(layer)]
    except KeyError:
        grass.fatal(
            _('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'
              ))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # colprefix for every raster map?
    if len(colprefixes) != len(rasters):
        grass.fatal(
            _("Number of raster maps ({0}) different from \
                      number of column prefixes ({1})".format(
                len(rasters), len(colprefixes))))

    vector = vs[0]

    rastertmp = "%s_%s" % (vector, tmpname)

    for raster in rasters:
        # check the input raster map
        if not grass.find_file(raster, 'cell')['file']:
            grass.fatal(_("Raster map <%s> not found") % raster)

    # save current settings:
    grass.use_temp_region()

    # Temporarily aligning region resolution to $RASTER resolution
    # keep boundary settings
    grass.run_command('g.region', align=rasters[0])

    # prepare base raster for zonal statistics
    try:
        nlines = grass.vector_info_topo(vector)['lines']
        # Create densified lines rather than thin lines
        if flags['d'] and nlines > 0:
            grass.run_command('v.to.rast',
                              input=vector,
                              layer=layer,
                              output=rastertmp,
                              use='cat',
                              flags='d',
                              quiet=True)
        else:
            grass.run_command('v.to.rast',
                              input=vector,
                              layer=layer,
                              output=rastertmp,
                              use='cat',
                              quiet=True)
    except CalledModuleError:
        grass.fatal(_("An error occurred while converting vector to raster"))

    # dump cats to file to avoid "too many argument" problem:
    p = grass.pipe_command('r.category', map=rastertmp, sep=';', quiet=True)
    cats = []

    for line in p.stdout:
        line = decode(line)
        cats.append(line.rstrip('\r\n').split(';')[0])
    p.wait()

    number = len(cats)
    if number < 1:
        grass.fatal(_("No categories found in raster map"))

    # Check if all categories got converted
    # Report categories from vector map
    vect_cats = grass.read_command('v.category',
                                   input=vector,
                                   option='report',
                                   flags='g').rstrip('\n').split('\n')

    # get number of all categories in selected layer
    for vcl in vect_cats:
        if vcl.split(' ')[0] == layer and vcl.split(' ')[1] == 'all':
            vect_cats_n = int(vcl.split(' ')[2])

    if vect_cats_n != number:
        grass.warning(
            _("Not all vector categories converted to raster. \
                         Converted {0} of {1}.".format(number, vect_cats_n)))

    # check if DBF driver used, in this case cut to 10 chars col names:
    try:
        fi = grass.vector_db(map=vector)[int(layer)]
    except KeyError:
        grass.fatal(
            _('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'
              ))
    # we need this for non-DBF driver:
    dbfdriver = fi['driver'] == 'dbf'

    # Find out which table is linked to the vector map on the given layer
    if not fi['table']:
        grass.fatal(
            _('There is no table connected to this map. Run v.db.connect or v.db.addtable first.'
              ))

    # replaced by user choiche
    #basecols = ['n', 'min', 'max', 'range', 'mean', 'stddev', 'variance', 'cf_var', 'sum']

    for i in xrange(len(rasters)):
        raster = rasters[i]
        colprefix = colprefixes[i]
        # we need at least three chars to distinguish [mea]n from [med]ian
        # so colprefix can't be longer than 6 chars with DBF driver
        if dbfdriver:
            colprefix = colprefix[:6]
            variables_dbf = {}

        # by default perccol variable is used only for "variables" variable
        perccol = "percentile"
        perc = None
        for b in basecols:
            if b.startswith('p'):
                perc = b
        if perc:
            # namespace is limited in DBF but the % value is important
            if dbfdriver:
                perccol = "per" + percentile
            else:
                perccol = "percentile_" + percentile
            percindex = basecols.index(perc)
            basecols[percindex] = perccol

        # dictionary with name of methods and position in "r.univar -gt"  output
        variables = {
            'number': 2,
            'null_cells': 2,
            'minimum': 4,
            'maximum': 5,
            'range': 6,
            'average': 7,
            'stddev': 9,
            'variance': 10,
            'coeff_var': 11,
            'sum': 12,
            'first_quartile': 14,
            'median': 15,
            'third_quartile': 16,
            perccol: 17
        }
        # this list is used to set the 'e' flag for r.univar
        extracols = ['first_quartile', 'median', 'third_quartile', perccol]
        addcols = []
        colnames = []
        extstat = ""
        for i in basecols:
            # this check the complete name of out input that should be truncated
            for k in variables.keys():
                if i in k:
                    i = k
                    break
            if i in extracols:
                extstat = 'e'
            # check if column already present
            currcolumn = ("%s_%s" % (colprefix, i))
            if dbfdriver:
                currcolumn = currcolumn[:10]
                variables_dbf[currcolumn.replace("%s_" % colprefix, '')] = i

            colnames.append(currcolumn)
            if currcolumn in grass.vector_columns(vector, layer).keys():
                if not flags['c']:
                    grass.fatal(
                        (_("Cannot create column <%s> (already present). ") %
                         currcolumn) +
                        _("Use -c flag to update values in this column."))
            else:
                if i == "n":
                    coltype = "INTEGER"
                else:
                    coltype = "DOUBLE PRECISION"
                addcols.append(currcolumn + ' ' + coltype)

        if addcols:
            grass.verbose(_("Adding columns '%s'") % addcols)
            try:
                grass.run_command('v.db.addcolumn',
                                  map=vector,
                                  columns=addcols,
                                  layer=layer)
            except CalledModuleError:
                grass.fatal(_("Adding columns failed. Exiting."))

        # calculate statistics:
        grass.message(_("Processing input data (%d categories)...") % number)

        # get rid of any earlier attempts
        grass.try_remove(sqltmp)

        f = open(sqltmp, 'w')

        # do the stats
        p = grass.pipe_command('r.univar',
                               flags='t' + extstat,
                               map=raster,
                               zones=rastertmp,
                               percentile=percentile,
                               sep=';')

        first_line = 1

        f.write("{0}\n".format(grass.db_begin_transaction(fi['driver'])))
        for line in p.stdout:
            if first_line:
                first_line = 0
                continue

            vars = decode(line).rstrip('\r\n').split(';')

            f.write("UPDATE %s SET" % fi['table'])
            first_var = 1
            for colname in colnames:
                variable = colname.replace("%s_" % colprefix, '', 1)
                if dbfdriver:
                    variable = variables_dbf[variable]
                i = variables[variable]
                value = vars[i]
                # convert nan, +nan, -nan, inf, +inf, -inf, Infinity, +Infinity,
                # -Infinity to NULL
                if value.lower().endswith('nan') or 'inf' in value.lower():
                    value = 'NULL'
                if not first_var:
                    f.write(" , ")
                else:
                    first_var = 0
                f.write(" %s=%s" % (colname, value))

            f.write(" WHERE %s=%s;\n" % (fi['key'], vars[0]))
        f.write("{0}\n".format(grass.db_commit_transaction(fi['driver'])))
        p.wait()
        f.close()

        grass.message(_("Updating the database ..."))
        exitcode = 0
        try:
            grass.run_command('db.execute',
                              input=sqltmp,
                              database=fi['database'],
                              driver=fi['driver'])
            grass.verbose(
                (_("Statistics calculated from raster map <{raster}>"
                   " and uploaded to attribute table"
                   " of vector map <{vector}>.").format(raster=raster,
                                                        vector=vector)))
        except CalledModuleError:
            grass.warning(
                _("Failed to upload statistics to attribute table of vector map <%s>."
                  ) % vector)
            exitcode = 1

            sys.exit(exitcode)
示例#37
0
def main():

    global allmap
    global trainmap
    global feature_vars
    global training_vars
    global model_output_csv
    global model_output_csvt
    global temptable
    global r_commands
    global reclass_files

    allmap = trainmap = feature_vars = training_vars = None
    model_output_csv = model_output_csvt = temptable = r_commands = None
    reclass_files = None

    voting_function = "voting <- function (x, w) {\n"
    voting_function += "res <- tapply(w, x, sum, simplify = TRUE)\n"
    voting_function += "maj_class <- as.numeric(names(res)[which.max(res)])\n"
    voting_function += "prob <- as.numeric(res[which.max(res)])\n"
    voting_function += "return(list(maj_class=maj_class, prob=prob))\n}"

    weighting_functions = {}
    weighting_functions[
        "smv"] = "weights <- rep(1/length(weighting_base), length(weighting_base))"
    weighting_functions[
        "swv"] = "weights <- weighting_base/sum(weighting_base)"
    weighting_functions[
        "bwwv"] = "weights <- 1-(max(weighting_base) - weighting_base)/(max(weighting_base) - min(weighting_base))"
    weighting_functions[
        "qbwwv"] = "weights <- ((min(weighting_base) - weighting_base)/(max(weighting_base) - min(weighting_base)))**2"

    packages = {
        "svmRadial": ["kernlab"],
        "svmLinear": ["kernlab"],
        "svmPoly": ["kernlab"],
        "rf": ["randomForest"],
        "ranger": ["ranger", "dplyr"],
        "rpart": ["rpart"],
        "C5.0": ["C50"],
        "xgbTree": ["xgboost", "plyr"],
    }

    install_package = "if(!is.element('%s', installed.packages()[,1])){\n"
    install_package += "cat('\\n\\nInstalling %s package from CRAN')\n"
    install_package += "if(!file.exists(Sys.getenv('R_LIBS_USER'))){\n"
    install_package += "dir.create(Sys.getenv('R_LIBS_USER'), recursive=TRUE)\n"
    install_package += ".libPaths(Sys.getenv('R_LIBS_USER'))}\n"
    install_package += "chooseCRANmirror(ind=1)\n"
    install_package += "install.packages('%s', dependencies=TRUE)}"

    if options["segments_map"]:
        allfeatures = options["segments_map"]
        segments_layer = options["segments_layer"]
        allmap = True
    else:
        allfeatures = options["segments_file"]
        allmap = False

    if options["training_map"]:
        training = options["training_map"]
        training_layer = options["training_layer"]
        trainmap = True
    else:
        training = options["training_file"]
        trainmap = False

    classcol = None
    if options["train_class_column"]:
        classcol = options["train_class_column"]
    output_classcol = options["output_class_column"]
    output_probcol = None
    if options["output_prob_column"]:
        output_probcol = options["output_prob_column"]
    classifiers = options["classifiers"].split(",")
    weighting_modes = options["weighting_modes"].split(",")
    weighting_metric = options["weighting_metric"]
    if len(classifiers) == 1:
        gscript.message("Only one classifier, so no voting applied")

    processes = int(options["processes"])
    folds = options["folds"]
    partitions = options["partitions"]
    tunelength = options["tunelength"]
    separator = gscript.separator(options["separator"])
    tunegrids = literal_eval(
        options["tunegrids"]) if options["tunegrids"] else {}

    max_features = None
    if options["max_features"]:
        max_features = int(options["max_features"])

    training_sample_size = None
    if options["training_sample_size"]:
        training_sample_size = options["training_sample_size"]

    tuning_sample_size = None
    if options["tuning_sample_size"]:
        tuning_sample_size = options["tuning_sample_size"]

    output_model_file = None
    if options["output_model_file"]:
        output_model_file = options["output_model_file"].replace("\\", "/")

    input_model_file = None
    if options["input_model_file"]:
        input_model_file = options["input_model_file"].replace("\\", "/")

    classification_results = None
    if options["classification_results"]:
        classification_results = options["classification_results"].replace(
            "\\", "/")

    probabilities = flags["p"]

    model_details = None
    if options["model_details"]:
        model_details = options["model_details"].replace("\\", "/")

    raster_segments_map = None
    if options["raster_segments_map"]:
        raster_segments_map = options["raster_segments_map"]

    classified_map = None
    if options["classified_map"]:
        classified_map = options["classified_map"]

    r_script_file = None
    if options["r_script_file"]:
        r_script_file = options["r_script_file"]

    variable_importance_file = None
    if options["variable_importance_file"]:
        variable_importance_file = options["variable_importance_file"].replace(
            "\\", "/")

    accuracy_file = None
    if options["accuracy_file"]:
        accuracy_file = options["accuracy_file"].replace("\\", "/")

    bw_plot_file = None
    if options["bw_plot_file"]:
        bw_plot_file = options["bw_plot_file"].replace("\\", "/")

    if allmap:
        feature_vars = gscript.tempfile().replace("\\", "/")
        gscript.run_command(
            "v.db.select",
            map_=allfeatures,
            file_=feature_vars,
            layer=segments_layer,
            quiet=True,
            overwrite=True,
        )
    else:
        feature_vars = allfeatures.replace("\\", "/")

    if trainmap:
        training_vars = gscript.tempfile().replace("\\", "/")
        gscript.run_command(
            "v.db.select",
            map_=training,
            file_=training_vars,
            layer=training_layer,
            quiet=True,
            overwrite=True,
        )
    else:
        training_vars = training.replace("\\", "/")

    r_commands = gscript.tempfile().replace("\\", "/")

    r_file = open(r_commands, "w")

    if processes > 1:
        install = install_package % ("doParallel", "doParallel", "doParallel")
        r_file.write(install)
        r_file.write("\n")

    # automatic installation of missing R packages
    install = install_package % ("caret", "caret", "caret")
    r_file.write(install)
    r_file.write("\n")
    install = install_package % ("e1071", "e1071", "e1071")
    r_file.write(install)
    r_file.write("\n")
    install = install_package % ("data.table", "data.table", "data.table")
    r_file.write(install)
    r_file.write("\n")
    for classifier in classifiers:
        if classifier in packages:
            for package in packages[classifier]:
                install = install_package % (package, package, package)
                r_file.write(install)
                r_file.write("\n")
    r_file.write("\n")
    r_file.write("library(caret)")
    r_file.write("\n")
    r_file.write("library(data.table)")
    r_file.write("\n")

    if processes > 1:
        r_file.write("library(doParallel)")
        r_file.write("\n")
        r_file.write("registerDoParallel(cores = %d)" % processes)
        r_file.write("\n")

    if not flags["t"]:
        r_file.write(
            "features <- data.frame(fread('%s', sep='%s', header=TRUE, blank.lines.skip=TRUE, showProgress=FALSE), row.names=1)"
            % (feature_vars, separator))
        r_file.write("\n")
        if classcol:
            r_file.write(
                "if('%s' %%in%% names(features)) {features <- subset(features, select=-%s)}"
                % (classcol, classcol))
            r_file.write("\n")

    if input_model_file:
        r_file.write("finalModels <- readRDS('%s')" % input_model_file)
        r_file.write("\n")
        for classifier in classifiers:
            for package in packages[classifier]:
                r_file.write("library(%s)" % package)
                r_file.write("\n")
    else:
        r_file.write(
            "training <- data.frame(fread('%s', sep='%s', header=TRUE, blank.lines.skip=TRUE, showProgress=FALSE), row.names=1)"
            % (training_vars, separator))
        r_file.write("\n")
        # We have to make sure that class variable values start with a letter as
        # they will be used as variables in the probabilities calculation
        r_file.write("origclassnames <- training$%s" % classcol)
        r_file.write("\n")
        r_file.write(
            "training$%s <- as.factor(paste('class', training$%s, sep='_'))" %
            (classcol, classcol))
        r_file.write("\n")
        if tuning_sample_size:
            r_file.write(
                "rndid <- with(training, ave(training[,1], %s, FUN=function(x) {sample.int(length(x))}))"
                % classcol)
            r_file.write("\n")
            r_file.write("tuning_data <- training[rndid<=%s,]" %
                         tuning_sample_size)
            r_file.write("\n")
        else:
            r_file.write("tuning_data <- training")
            r_file.write("\n")
        # If a max_features value is set, then proceed to feature selection.
        # Currently, feature selection uses random forest. TODO: specific feature selection for each classifier.
        if max_features:
            r_file.write(
                "RfeControl <- rfeControl(functions=rfFuncs, method='cv', number=10, returnResamp = 'all')"
            )
            r_file.write("\n")
            r_file.write(
                "RfeResults <- rfe(subset(tuning_data, select=-%s), tuning_data$%s, sizes=c(1:%i), rfeControl=RfeControl)"
                % (classcol, classcol, max_features))
            r_file.write("\n")
            r_file.write("if(length(predictors(RfeResults))>%s)" %
                         max_features)
            r_file.write("\n")
            r_file.write(
                "{if((RfeResults$results$Accuracy[%s+1] - RfeResults$results$Accuracy[%s])/RfeResults$results$Accuracy[%s] < 0.03)"
                % (max_features, max_features, max_features))
            r_file.write("\n")
            r_file.write(
                "{RfeUpdate <- update(RfeResults, subset(tuning_data, select=-%s), tuning_data$%s, size=%s)"
                % (classcol, classcol, max_features))
            r_file.write("\n")
            r_file.write("bestPredictors <- RfeUpdate$bestVar}}")
            r_file.write(" else {")
            r_file.write("\n")
            r_file.write("bestPredictors <- predictors(RfeResults)}")
            r_file.write("\n")
            r_file.write(
                "tuning_data <- tuning_data[,c('%s', bestPredictors)]" %
                classcol)
            r_file.write("\n")
            r_file.write("training <- training[,c('%s', bestPredictors)]" %
                         classcol)
            r_file.write("\n")
            if not flags["t"]:
                r_file.write("features <- features[,bestPredictors]")
                r_file.write("\n")
        if probabilities:
            r_file.write(
                "MyControl.cv <- trainControl(method='repeatedcv', number=%s, repeats=%s, classProbs=TRUE, sampling='down')"
                % (folds, partitions))
        else:
            r_file.write(
                "MyControl.cv <- trainControl(method='repeatedcv', number=%s, repeats=%s, sampling='down')"
                % (folds, partitions))
        r_file.write("\n")
        r_file.write("fmla <- %s ~ ." % classcol)
        r_file.write("\n")
        r_file.write("models.cv <- list()")
        r_file.write("\n")
        r_file.write("finalModels <- list()")
        r_file.write("\n")
        r_file.write("variableImportance <- list()")
        r_file.write("\n")
        if training_sample_size:
            r_file.write(
                "rndid <- with(training, ave(training[,2], %s, FUN=function(x) {sample.int(length(x))}))"
                % classcol)
            r_file.write("\n")
            r_file.write("training_data <- training[rndid<=%s,]" %
                         training_sample_size)
            r_file.write("\n")
        else:
            r_file.write("training_data <- training")
            r_file.write("\n")
        for classifier in classifiers:
            if classifier in tunegrids:
                r_file.write("Grid <- expand.grid(%s)" % tunegrids[classifier])
                r_file.write("\n")
                r_file.write(
                    "%sModel.cv <- train(fmla, tuning_data, method='%s', trControl=MyControl.cv, tuneGrid=Grid"
                    % (classifier, classifier))
            else:
                r_file.write(
                    "%sModel.cv <- train(fmla, tuning_data, method='%s', trControl=MyControl.cv, tuneLength=%s"
                    % (classifier, classifier, tunelength))
            if flags["n"]:
                r_file.write(", preprocess=c('center', 'scale')")
            r_file.write(")")
            r_file.write("\n")
            r_file.write("models.cv$%s <- %sModel.cv" %
                         (classifier, classifier))
            r_file.write("\n")
            r_file.write(
                "finalControl <- trainControl(method = 'none', classProbs = TRUE)"
            )
            r_file.write("\n")

            r_file.write(
                "finalModel <- train(fmla, training_data, method='%s', trControl=finalControl, tuneGrid=%sModel.cv$bestTune"
                % (classifier, classifier))
            if flags["n"]:
                r_file.write(", preprocess=c('center', 'scale')")
            r_file.write(")")
            r_file.write("\n")
            r_file.write("finalModels$%s <- finalModel" % classifier)
            r_file.write("\n")
            r_file.write("variableImportance$%s <- varImp(finalModel)" %
                         classifier)
            r_file.write("\n")
        if len(classifiers) > 1:
            r_file.write("resamps.cv <- resamples(models.cv)")
            r_file.write("\n")
            r_file.write(
                "accuracy_means <- as.vector(apply(resamps.cv$values[seq(2,length(resamps.cv$values), by=2)], 2, mean))"
            )
            r_file.write("\n")
            r_file.write(
                "kappa_means <- as.vector(apply(resamps.cv$values[seq(3,length(resamps.cv$values), by=2)], 2, mean))"
            )
            r_file.write("\n")
        else:
            r_file.write("resamps.cv <- models.cv[[1]]$resample")
            r_file.write("\n")
            r_file.write("accuracy_means <- mean(resamps.cv$Accuracy)")
            r_file.write("\n")
            r_file.write("kappa_means <- mean(resamps.cv$Kappa)")
            r_file.write("\n")

        if output_model_file:
            r_file.write("saveRDS(finalModels, '%s')" % (output_model_file))
            r_file.write("\n")

    if not flags["t"]:
        r_file.write("predicted <- data.frame(predict(finalModels, features))")
        r_file.write("\n")
        # Now erase the 'class_' prefix again in order to get original class values
        r_file.write(
            "predicted <- data.frame(sapply(predicted, function (x) {gsub('class_', '', x)}))"
        )
        r_file.write("\n")
        if probabilities:
            r_file.write(
                "probabilities <- data.frame(predict(finalModels, features, type='prob'))"
            )
            r_file.write("\n")
            r_file.write(
                "colnames(probabilities) <- gsub('.c', '_prob_c', colnames(probabilities))"
            )
            r_file.write("\n")
        r_file.write("ids <- rownames(features)")
        r_file.write("\n")
        # We try to liberate memory space as soon as possible, so erasing non necessary data
        r_file.write("rm(features)")
        r_file.write("\n")
        if flags["i"] or len(classifiers) == 1:
            r_file.write("resultsdf <- data.frame(id=ids, predicted)")
        else:
            r_file.write("resultsdf <- data.frame(id=ids)")
        r_file.write("\n")

        if len(classifiers) > 1:
            r_file.write(voting_function)
            r_file.write("\n")

            if weighting_metric == "kappa":
                r_file.write("weighting_base <- kappa_means")
            else:
                r_file.write("weighting_base <- accuracy_means")
            r_file.write("\n")
            for weighting_mode in weighting_modes:
                r_file.write(weighting_functions[weighting_mode])
                r_file.write("\n")
                r_file.write("weights <- weights / sum(weights)")
                r_file.write("\n")
                r_file.write("vote <- apply(predicted, 1, voting, w=weights)")
                r_file.write("\n")
                r_file.write(
                    "vote <- as.data.frame(matrix(unlist(vote), ncol=2, byrow=TRUE))"
                )
                r_file.write("\n")
                r_file.write("resultsdf$%s_%s <- vote$V1" %
                             (output_classcol, weighting_mode))
                r_file.write("\n")
                r_file.write("resultsdf$%s_%s <- vote$V2" %
                             (output_probcol, weighting_mode))
                r_file.write("\n")

        r_file.write("rm(predicted)")
        r_file.write("\n")

        if allmap and not flags["f"]:
            model_output = gscript.tempfile().replace("\\", "/")
            model_output_csv = model_output + ".csv"
            write_string = "write.csv(resultsdf, '%s'," % model_output_csv
            write_string += " row.names=FALSE, quote=FALSE)"
            r_file.write(write_string)
            r_file.write("\n")

        if classified_map:
            reclass_files = {}
            if len(classifiers) > 1:
                if flags["i"]:
                    for classifier in classifiers:
                        tmpfilename = gscript.tempfile()
                        reclass_files[classifier] = tmpfilename.replace(
                            "\\", "/")
                        r_file.write(
                            "tempdf <- data.frame(resultsdf$id, resultsdf$%s)"
                            % (classifier))
                        r_file.write("\n")
                        r_file.write(
                            "reclass <- data.frame(out=apply(tempdf, 1, function(x) paste(x[1],'=', x[2])))"
                        )
                        r_file.write("\n")
                        r_file.write(
                            "write.table(reclass$out, '%s', col.names=FALSE, row.names=FALSE, quote=FALSE)"
                            % reclass_files[classifier])
                        r_file.write("\n")
                for weighting_mode in weighting_modes:
                    tmpfilename = gscript.tempfile()
                    reclass_files[weighting_mode] = tmpfilename.replace(
                        "\\", "/")
                    r_file.write(
                        "tempdf <- data.frame(resultsdf$id, resultsdf$%s_%s)" %
                        (output_classcol, weighting_mode))
                    r_file.write("\n")
                    r_file.write(
                        "reclass <- data.frame(out=apply(tempdf, 1, function(x) paste(x[1],'=', x[2])))"
                    )
                    r_file.write("\n")
                    r_file.write(
                        "write.table(reclass$out, '%s', col.names=FALSE, row.names=FALSE, quote=FALSE)"
                        % reclass_files[weighting_mode])
                    r_file.write("\n")
            else:
                tmpfilename = gscript.tempfile()
                reclass_files[classifiers[0]] = tmpfilename.replace("\\", "/")
                r_file.write(
                    "reclass <- data.frame(out=apply(resultsdf, 1, function(x) paste(x[1],'=', x[2])))"
                )
                r_file.write("\n")
                r_file.write(
                    "write.table(reclass$out, '%s', col.names=FALSE, row.names=FALSE, quote=FALSE)"
                    % reclass_files[classifiers[0]])
                r_file.write("\n")

        if classification_results:
            if probabilities:
                r_file.write("resultsdf <- cbind(resultsdf, probabilities)")
                r_file.write("\n")
                r_file.write("rm(probabilities)")
                r_file.write("\n")
            r_file.write(
                "write.csv(resultsdf, '%s', row.names=FALSE, quote=FALSE)" %
                classification_results)
            r_file.write("\n")
            r_file.write("rm(resultsdf)")
            r_file.write("\n")
        r_file.write("\n")

    if accuracy_file:
        r_file.write(
            "df_means <- data.frame(method=names(models.cv),accuracy=accuracy_means, kappa=kappa_means)"
        )
        r_file.write("\n")
        r_file.write(
            "write.csv(df_means, '%s', row.names=FALSE, quote=FALSE)" %
            accuracy_file)
        r_file.write("\n")
    if variable_importance_file:
        r_file.write("sink('%s')" % variable_importance_file)
        r_file.write("\n")
        for classifier in classifiers:
            r_file.write("cat('Classifier: %s')" % classifier)
            r_file.write("\n")
            r_file.write("cat('******************************')")
            r_file.write("\n")
            r_file.write(
                "variableImportance$rf$importance[order(variableImportance$rf$importance$Overall, decreasing=TRUE),, drop=FALSE]"
            )
            r_file.write("\n")
        r_file.write("sink()")
        r_file.write("\n")
    if model_details:
        r_file.write("sink('%s')" % model_details)
        r_file.write("\n")
        r_file.write("cat('BEST TUNING VALUES')")
        r_file.write("\n")
        r_file.write("cat('******************************')")
        r_file.write("\n")
        r_file.write("\n")
        r_file.write("lapply(models.cv, function(x) x$best)")
        r_file.write("\n")
        r_file.write("cat('\n\n')")
        r_file.write("\n")
        r_file.write("cat('SUMMARY OF RESAMPLING RESULTS')")
        r_file.write("\n")
        r_file.write("cat('******************************')")
        r_file.write("\n")
        r_file.write("cat('\n\n')")
        r_file.write("\n")
        r_file.write("summary(resamps.cv)")
        r_file.write("\n")
        r_file.write("cat('\n')")
        r_file.write("\n")
        r_file.write("cat('\nRESAMPLED CONFUSION MATRICES')")
        r_file.write("\n")
        r_file.write("cat('******************************')")
        r_file.write("\n")
        r_file.write("cat('\n\n')")
        r_file.write("\n")
        r_file.write(
            "conf.mat.cv <- lapply(models.cv, function(x) confusionMatrix(x))")
        r_file.write("\n")
        r_file.write("print(conf.mat.cv)")
        r_file.write("\n")
        r_file.write("cat('DETAILED CV RESULTS')")
        r_file.write("\n")
        r_file.write("cat('\n\n')")
        r_file.write("\n")
        r_file.write("cat('******************************')")
        r_file.write("\n")
        r_file.write("cat('\n\n')")
        r_file.write("\n")
        r_file.write("lapply(models.cv, function(x) x$results)")
        r_file.write("\n")
        r_file.write("sink()")
        r_file.write("\n")

    if bw_plot_file and len(classifiers) > 1:
        r_file.write("png('%s.png')" % bw_plot_file)
        r_file.write("\n")
        r_file.write("print(bwplot(resamps.cv))")
        r_file.write("\n")
        r_file.write("dev.off()")
        r_file.write("\n")

    r_file.close()

    if r_script_file:
        shutil.copy(r_commands, r_script_file)

    gscript.message("Running R now. Following output is R output.")
    try:
        subprocess.check_call(
            ["Rscript", r_commands],
            stderr=subprocess.STDOUT,
        )
    except subprocess.CalledProcessError:
        gscript.fatal(
            "There was an error in the execution of the R script.\nPlease check the R output."
        )

    gscript.message("Finished running R.")

    if allmap and not flags["f"]:

        model_output_csvt = model_output + ".csvt"
        temptable = "classif_tmp_table_%d" % os.getpid()

        f = open(model_output_csvt, "w")
        header_string = '"Integer"'
        if flags["i"]:
            for classifier in classifiers:
                header_string += ',"Integer"'
        if len(classifiers) > 1:
            for weighting_mode in weighting_modes:
                header_string += ',"Integer"'
                header_string += ',"Real"'
        else:
            header_string += ',"Integer"'

        f.write(header_string)
        f.close()

        gscript.message("Loading results into attribute table")
        gscript.run_command(
            "db.in.ogr",
            input_=model_output_csv,
            output=temptable,
            overwrite=True,
            quiet=True,
        )
        index_creation = "CREATE INDEX idx_%s_cat" % temptable
        index_creation += " ON %s (id)" % temptable
        gscript.run_command("db.execute", sql=index_creation, quiet=True)
        columns = gscript.read_command("db.columns",
                                       table=temptable).splitlines()[1:]
        orig_cat = gscript.vector_db(allfeatures)[int(segments_layer)]["key"]
        gscript.run_command(
            "v.db.join",
            map_=allfeatures,
            column=orig_cat,
            otable=temptable,
            ocolumn="id",
            subset_columns=columns,
            quiet=True,
        )

    if classified_map:
        for classification, reclass_file in reclass_files.items():
            output_map = classified_map + "_" + classification
            gscript.run_command(
                "r.reclass",
                input=raster_segments_map,
                output=output_map,
                rules=reclass_file,
                quiet=True,
            )
示例#38
0
def main():
    global tmp, nuldev
    nuldev = file(os.devnull, 'w')

    ## setup temporary files
    tmp = grass.tempfile()
    tmpf = 'v_net_neighbors'

    inmap = options['input']
    outfile = options['dump']

    # check if input file exists
    if not grass.find_file(inmap, element = 'vector')['file']:
        grass.fatal(_("<%s> does not exist.") % inmap)

    # check for table in net vector map
    try:
        f = grass.vector_db(inmap)[2]
    except KeyError:
        grass.run_command('v.db.addtable', _map = inmap, layer = 2,
                          quiet = True, stderr = nuldev)
        
    ## extract temp nodes and lines
    nodes = tmpf + '_nodes'
    lines = tmpf + '_lines'

    iflines = grass.vector_info_topo(inmap)['lines']
    ifbounds = grass.vector_info_topo(inmap)['boundaries']
    if iflines != 0:
        vect_type = 'line'
    if ifbounds != 0:
        vect_type = 'boundary'
    
    if iflines != 0 and ifbounds != 0:
        grass.fatal(_("Input net vector map must have lines OR boundaries, not both"))

    grass.run_command('v.extract', input = inmap, output = nodes, layer = 2, 
                      _type = 'point', flags = 't', quiet = True, stderr = nuldev)
    grass.run_command('v.extract', input = inmap, output = lines,  
                      _type = vect_type, flags = 't', quiet = True, stderr = nuldev)

    ## filter nodes on line intersections if with '-i' flag 
    if flags['i']:
        p = grass.pipe_command('v.net', _input = inmap, operation = 'nreport',
                               quiet = True, stderr = nuldev)
        c  = p.communicate()[0].strip().split('\n')
        filt = [elem for elem in c if ',' in elem]

        fnodes = []
        
        for x in filt:
            spl = x.split(' ')
            fnodes.append(spl[0])

        fnodes_str = ','.join(fnodes)
        
        nsel = tmpf + '_nsel'
        grass.run_command('v.extract', _input = nodes, _list = fnodes_str, 
                          output = nsel, layer = 2, _type = 'point',
                          quiet = True, stderr = nuldev)

        grass.run_command('g.rename', vect = (nsel,nodes), overwrite = True, 
                          quiet = True, stderr = nuldev)
        
        
    if not flags['p']:
        grass.run_command('v.db.addcol', _map = inmap, layer = 2, 
                          columns = 'neigh_node varchar(255)', 
                          quiet = True, stderr = nuldev)

    ## main cycle (extract every node and make 2 selects)
    out_dict = {}

    p = grass.pipe_command('v.category', _input = nodes, opt = 'print', layer = 2, 
                           _type = 'point', quiet = True, stderr = nuldev)
    c = p.communicate()[0].strip().split('\n')
    for cat in c:
        icat = int(cat)

        nnode = nodes + cat
        grass.run_command('v.extract', _input = nodes, _list = cat, 
                          output = nnode, layer = 2, _type = 'point',
                          flags = 't', quiet = True, stderr = nuldev)

        linode = nnode + '_lines'
        grass.run_command('v.select', ain = lines, _bin = nnode, 
                          blayer = 2, output = linode, 
                          operator = 'overlap', quiet = True, stderr = nuldev)
        
        linode_pts = linode + '_pts'
        grass.run_command('v.select', ain = nodes, alayer = 2, 
                          _bin = linode, output = linode_pts, 
                          operator = 'overlap', quiet = True, stderr = nuldev)
        
        pcat = grass.pipe_command('v.category', _input = linode_pts, layer = 2,
                                  opt = 'print', quiet = True, stderr = nuldev)
        ccat = pcat.communicate()[0].strip().split('\n')
        ccat.remove(cat)
        
        ncat_list = []
        ncat_list.append(ccat)

        str_db = ','.join(map(str, ncat_list))
        str_db1 = str_db.replace("[", "").replace("]", "").replace("'", "").replace(" ", "")

        out_dict[icat] = str_db1
        
        if not flags['p']:
            grass.run_command('v.db.update', _map = inmap, layer = 2, 
                              column = 'neigh_node', value = '%s' % (str_db1), 
                              where = "cat = %d" % (icat), 
                              quiet = True, stderr = nuldev )
        
    ## output to stdout / file
    tmp3 = tmp + '.out'
    outf2 = file(tmp3, 'w')
    for key in sorted(out_dict.iterkeys()):
        val = out_dict[key]
        print >> outf2, "%s %s" % (key,val)
    outf2.close()
    
    if flags['p']:
        with open(tmp3, 'rb') as f:
            print f.read()

    if outfile:
        shutil.copyfile(tmp3, outfile)
                

    return 0
示例#39
0
def main():
    mapname = options["map"]
    layer = options["layer"]
    option = options["option"]
    units = options["units"]
    sort = options["sort"]
    fs = separator(options["separator"])

    nuldev = open(os.devnull, "w")

    if not grass.find_file(mapname, "vector")["file"]:
        grass.fatal(_("Vector map <%s> not found") % mapname)

    if int(layer) in grass.vector_db(mapname):
        colnames = grass.vector_columns(mapname,
                                        layer,
                                        getDict=False,
                                        stderr=nuldev)
        isConnection = True
    else:
        isConnection = False
        colnames = ["cat"]

    if option == "coor":
        extracolnames = ["x", "y", "z"]
    else:
        extracolnames = [option]

    if units == "percent":
        unitsp = "meters"
    elif units:
        unitsp = units
    else:
        unitsp = None

    # NOTE: we suppress -1 cat and 0 cat
    if isConnection:
        f = grass.vector_db(map=mapname)[int(layer)]
        p = grass.pipe_command("v.db.select",
                               flags="e",
                               quiet=True,
                               map=mapname,
                               layer=layer)
        records1 = []
        catcol = -1
        ncols = 0
        for line in p.stdout:
            cols = decode(line).rstrip("\r\n").split("|")
            if catcol == -1:
                ncols = len(cols)
                for i in range(0, ncols):
                    if cols[i] == f["key"]:
                        catcol = i
                        break
                if catcol == -1:
                    grass.fatal(
                        _("There is a table connected to input vector map '%s', but "
                          "there is no key column '%s'.") %
                        (mapname, f["key"]))
                continue
            if cols[catcol] == "-1" or cols[catcol] == "0":
                continue
            records1.append(cols[:catcol] + [int(cols[catcol])] +
                            cols[(catcol + 1):])
        p.wait()
        if p.returncode != 0:
            sys.exit(1)

        records1.sort(key=lambda r: r[catcol])

        if len(records1) == 0:
            try:
                grass.fatal(
                    _("There is a table connected to input vector map '%s', but "
                      "there are no categories present in the key column '%s'. Consider using "
                      "v.to.db to correct this.") % (mapname, f["key"]))
            except KeyError:
                pass

        # fetch the requested attribute sorted by cat:
        p = grass.pipe_command(
            "v.to.db",
            flags="p",
            quiet=True,
            map=mapname,
            option=option,
            layer=layer,
            units=unitsp,
        )
        records2 = []
        for line in p.stdout:
            fields = decode(line).rstrip("\r\n").split("|")
            if fields[0] in ["cat", "-1", "0"]:
                continue
            records2.append([int(fields[0])] + fields[1:])
        p.wait()
        records2.sort()

        # make pre-table
        # len(records1) may not be the same as len(records2) because
        # v.db.select can return attributes that are not linked to features.
        records3 = []
        for r2 in records2:
            rec = list(filter(lambda r1: r1[catcol] == r2[0], records1))
            if len(rec) > 0:
                res = rec[0] + r2[1:]
            elif flags["d"]:
                res = [r2[0]] + [""] * (ncols - 1) + r2[1:]
            else:
                continue
            records3.append(res)
    else:
        catcol = 0
        records1 = []
        p = grass.pipe_command("v.category",
                               inp=mapname,
                               layer=layer,
                               option="print")
        for line in p.stdout:
            field = int(decode(line).rstrip())
            if field > 0:
                records1.append(field)
        p.wait()
        records1.sort()
        records1 = uniq(records1)

        # make pre-table
        p = grass.pipe_command(
            "v.to.db",
            flags="p",
            quiet=True,
            map=mapname,
            option=option,
            layer=layer,
            units=unitsp,
        )
        records3 = []
        for line in p.stdout:
            fields = decode(line).rstrip("\r\n").split("|")
            if fields[0] in ["cat", "-1", "0"]:
                continue
            records3.append([int(fields[0])] + fields[1:])
        p.wait()
        records3.sort()

    # print table header
    if not flags["c"]:
        sys.stdout.write(fs.join(colnames + extracolnames) + "\n")

    # make and print the table:
    numcols = len(colnames) + len(extracolnames)

    # calculate percents if requested
    if units == "percent" and option != "coor":
        # calculate total value
        total = 0
        for r in records3:
            total += float(r[-1])

        # calculate percentages
        records4 = [float(r[-1]) * 100 / total for r in records3]
        if type(records1[0]) == int:
            records3 = [[r1] + [r4] for r1, r4 in zip(records1, records4)]
        else:
            records3 = [r1 + [r4] for r1, r4 in zip(records1, records4)]

    # sort results
    if sort:
        if sort == "asc":
            if option == "coor":
                records3.sort(
                    key=lambda r: (float(r[-3]), float(r[-2]), float(r[-1])))
            else:
                records3.sort(key=lambda r: float(r[-1]))
        else:
            if option == "coor":
                records3.sort(
                    key=lambda r: (float(r[-3]), float(r[-2]), float(r[-1])),
                    reverse=True,
                )
            else:
                records3.sort(key=lambda r: float(r[-1]), reverse=True)

    for r in records3:
        sys.stdout.write(fs.join(map(str, r)) + "\n")
def worker(params, nace_queue, output_queue):
    try:
        pid = os.getpid()
        if params['processes'] > 1:
            params['trade_matrix_employment_file'] = params['trade_matrix_employment_file'] % pid
        for nace in iter(nace_queue.get, 'STOP'):

            bigtic = timeit.default_timer()
            #gscript.info("Working on NACE %s" % nace)
            print("Working on NACE %s" % nace)

            try:
                gamma = params['gammas_nace'][nace]
                gscript.verbose("Gamma of NACE %s = %f" % (nace, gamma))
            except:
                gscript.message("No gamma value found for NACE %s" % nace)
                output_queue.put([nace, None])
                continue

            # If we use the version where firms are grouped by pixel,
            # we have to identify each map by NACE code
            if params['firms_grouped_by_pixel']:
                point_map = params['pointmap'] + nace
            else:
                point_map = params['pointmap']

            # Get total volume of this NACE 5 and scale the NACE 2 consumption map to that total volume.
            # Do not take into account the share of the production that is exported outside the country
            # or invested into capital accumulation
            if params['firms_grouped_by_pixel']:
                total_volume = float(gscript.read_command('v.db.select',
                                                         flags='c',
                                                         map_=point_map,
                                                         column="sum(%s)" % params['volume_measure'],
                                                         quiet=True).rstrip())
            else:
                total_volume = float(gscript.read_command('v.db.select',
                                                         flags='c',
                                                         map_=point_map,
                                                         column="sum(%s)" % params['volume_measure'],
                                                         where="%s='%s'" % (params['nace_info_column'], nace),
                                                         quiet=True).rstrip())

            total_export = total_volume * params['export_investment_shares'][nace[:params['exp_inv_nace_precision']]]['export']
            total_investment = total_volume * params['export_investment_shares'][nace[:params['exp_inv_nace_precision']]]['investment']
            total_volume = total_volume - total_export - total_investment

            # Should we use a specific consumption 'population' map per NACE
            # or one general map for all (e.g. simple population)
            if params['use_per_nace_pop']:
                dest_map_unscaled = params['destination_map'] + nace[:params['dest_map_nace_precision']]
            else:
                dest_map_unscaled = params['destination_map']

    	    dest_map = "gravity_io_tmp_scaled_pop_%d" % pid


            map_stats = gscript.parse_command('r.univar',
                                              flags = "g",
                                              map = dest_map_unscaled)

            mapcalc_expression = "%s = " % dest_map
    	    mapcalc_expression += "%s * " % dest_map_unscaled
            mapcalc_expression += "float(%s) / %s" % (total_volume, map_stats['sum'])
            gscript.run_command('r.mapcalc',
                                expression=mapcalc_expression,
                                overwrite=True,
                                quiet=True)

            if DEBUG:
                unscaled_sum = map_stats['sum']
                map_stats = gscript.parse_command('r.univar',
                                                  flags = "g",
                                                  map = dest_map)
                print("total production employment: %d, sum of unscaled NACE 2 consumption map: %s, sum of scaled consumption map: %.3f" % (total_volume, unscaled_sum, float(map_stats['sum'])))


            # Now get the data firm by firm (or pixel by pixel)
            query = "SELECT cat, x, y, %s, %s" % (params['volume_measure'], params['spatial_unit_name'])
            query += " FROM %s" % point_map
            query += " WHERE %s>0" % params['volume_measure']
            if not params['firms_grouped_by_pixel']:
                query += " AND %s='%s'" % (params['nace_info_column'], nace)

            database = gscript.vector_db(point_map)[1]['database']
            firms_data = gscript.read_command('db.select',
                                        flags = 'c',
                                        sql = query,
                                        database = database)
            
            if len(firms_data) == 0:
                continue

            # We assume that in sectors with more than 5 firms,
            # only firms that have a volume above a certain
            # percentile of volumes in the sector actually export.
            # Calculate volume threshold value, sum total volume of above
            # threshold firms, and estimate export share for those firms as
            # total_export / total volume of firms above threshold volume
            # as long as total_export + investment of those firms is not higher
            # than their total volume (i.e. we assume that investment share is equal
            # across all sizes of firms).
            # If necessary we reduce the threshold value by a step of 0.1 until we have
            # sufficient volume.

            if len(firms_data.splitlines()) > 5:
                volumes=[]
                for firm in firms_data.splitlines():
                    volume = float(firm.split('|')[3])
                    volumes.append(volume)

                if max(volumes) == min(volumes):
                    volume_threshold = 0
                    export_share = params['export_investment_shares'][nace[:params['exp_inv_nace_precision']]]['export']
                else:
                    volumes.sort()
                    volume_threshold = percentile(volumes, params['volume_perc'])
                    export_firms_total_volume = sum([v for v in volumes if v > volume_threshold])

                    thresh_percentile = params['volume_perc']
                    inv_share = params['export_investment_shares'][nace[:params['exp_inv_nace_precision']]]['investment']

                    while export_firms_total_volume < (export_firms_total_volume * inv_share + total_export):
                        thresh_percentile -= 0.1
                        volume_threshold = percentile(volumes, thresh_percentile)
                        export_firms_total_volume = sum([v for v in volumes if v > volume_threshold])

                    export_share = total_export / export_firms_total_volume
            else:
                volume_threshold = 0
                export_share = params['export_investment_shares'][nace[:params['exp_inv_nace_precision']]]['export']

            if DEBUG:
                print("volume threshold: %f" % volume_threshold)
                print("export share: %f" % export_share)

            firm_cats = []
            firm_spatial_units = []
            firm_volumes = []
            firm_exports = []
            firm_investments = []

            gscript.verbose("Calculating distance maps for NACE %s" % nace)
            tempdist = 'gravity_io_tmp_dist_%s_%d' % (nace, pid)

            # Starting the first loop to get data firm by firm (or pixel by
            # pixel) and to calculate distance maps and their derivatives
            # which stay constant over the entire estimation
            tic = timeit.default_timer()
            for firm in firms_data.splitlines():
                cat = int(firm.split('|')[0])
                firm_cats.append(cat)
                x = firm.split('|')[1]
                y = firm.split('|')[2]
                spatial_unit = firm.split('|')[4]
                firm_spatial_units.append(spatial_unit)
                volume = float(firm.split('|')[3])
                if volume > volume_threshold:
                    export = volume * export_share
                    firm_exports.append(export)
                else:
                    export = 0
                    firm_exports.append(export)
                investment = volume * params['export_investment_shares'][nace[:params['exp_inv_nace_precision']]]['investment']
                firm_investments.append(investment)
                volume = volume - export - investment
                firm_volumes.append(volume)
            
                if gamma > 0:
                    # Calculate distance weighted firm rate for each pixel
                    # If distance is 0, use fixed internal distance (parameter)

                    mapcalc_expression = "eval( "
                    mapcalc_expression += "tmpdist = sqrt((x()-%s)^2 + (y()-%s)^2))\n" % (x, y)
                    mapcalc_expression += "%s = if(tmpdist < %f, %f, tmpdist)" % (tempdist, params['internal_distance'], params['internal_distance'])
                    gscript.run_command('r.mapcalc',
                                        expression=mapcalc_expression,
                                        overwrite=True,
                                        quiet=True)

                    # Now create a map with the inverse distance exposed to gamma

                    tempdistexp = 'gravity_io_tmp_dist_exp_%s_%d_%d' % (nace, cat, pid)
                    mapcalc_expression = "%s = " % tempdistexp
                    mapcalc_expression += "1.0 / (%s ^ %f)" % (tempdist, gamma)
                    gscript.run_command('r.mapcalc',
                                        expression=mapcalc_expression,
                                        overwrite=True,
                                        quiet=True)


            del firms_data

            toc = timeit.default_timer()
            gscript.verbose("NACE %s: Firms: %d, Data preparation time: %f" % (nace, len(firm_cats), toc-tic))

            if gamma > 0:
                # Now we start the second loop over firms/pixels which will allow
                # estimating the A and B coefficients in the doubly-constrained
                # gravity model. We iterate to approach the values of these
                # coefficients and stop when we reach either a minimum change
                # threshold (checked against the mean of the changes of the B
                # coefficient which is defined pixel by pixel) or a maximum number of iterations

                firm_rate_map = "firm_rate_%d" % (pid)
            
                # A and B are the constraining factors in the
                # doubly-constrained gravity model.
                # As each depends on the other, We set each coefficient to 1 at the start
                # and then iterate over the respective adaptations until either the difference
                # falls below a defined threshold, or we reach the maximum number of iterations.

                A = [9999]*len(firm_cats)
                Anew = [1]*len(firm_cats)
                diffA = [None]*len(firm_cats)
                indA = 'gravity_io_tmp_indA_%d' % pid
                B = 'gravity_io_tmp_B_%d' % pid
                newB = 'gravity_io_tmp_newB_%d' % pid
                mapcalc_expression = "%s = 1" % B
                gscript.run_command('r.mapcalc',
                                   expression=mapcalc_expression,
                                   overwrite=True,
                                   quiet=True)
                diffmap = 'gravity_io_tmp_diffmap_%d' %pid
                total_A_diff = 9999
                total_B_diff = 9999
                sum_rates = 'sum_rates_%d' % pid
                temp_sum_rates = 'gravity_io_tmp_sum_rates_%d' % pid

                iterations = 0
                tic = timeit.default_timer()

                gscript.verbose("Launching constraint calculation for NACE %s" % nace)

                while (total_A_diff > params['constraint_calculation_threshold'] or total_B_diff > params['constraint_calculation_threshold']) and iterations < params['constraint_calculation_max_iter']:

                    iterations += 1
                    ticiter = timeit.default_timer()

                    mapcalc_expression = "%s = 0" % sum_rates
                    gscript.run_command('r.mapcalc',
                                        expression=mapcalc_expression,
                                        overwrite=True,
                                        quiet=True)

                    for i in range(len(firm_cats)):

                        cat = firm_cats[i]

                        tempdistexp = 'gravity_io_tmp_dist_exp_%s_%s_%d' % (nace, cat, pid)

                        mapcalc_expression = "%s = %s * %s * %s" % (indA, B, dest_map, tempdistexp) 
                        gscript.run_command('r.mapcalc',
                                   expression=mapcalc_expression,
                                   overwrite=True,
                                   quiet=True)

                        map_stats = gscript.parse_command('r.univar',
                                                          flags = 'g',
                                                          map_ = indA,
                                                          quiet=True)

                        Anew[i] = (1.0 / float(map_stats['sum']))
                        diffA[i] = float(abs(A[i]-Anew[i]))/A[i]


                        mapcalc_expression = "%s = %s + %.10f * %.10f * %s\n" % (temp_sum_rates, sum_rates, Anew[i], firm_volumes[i], tempdistexp)
                        gscript.run_command('r.mapcalc',
                                            expression=mapcalc_expression,
                                            overwrite=True,
                                            quiet=True)


                        gscript.run_command('g.rename',
                                            raster=[temp_sum_rates,sum_rates],
                                            overwrite=True,
                                            quiet=True)


                    A = list(Anew)


                    mapcalc_expression = "%s = 1.0 / %s" % (newB, sum_rates)
                    gscript.run_command('r.mapcalc',
                               expression=mapcalc_expression,
                               overwrite=True,
                               quiet=True)


                    mapcalc_expression = "%s = float(abs(%s - %s))/%s" % (diffmap, B, newB, B)
                    gscript.run_command('r.mapcalc',
                               expression=mapcalc_expression,
                               overwrite=True,
                               quiet=True)

                    map_stats = gscript.parse_command('r.univar',
                                                      flags = 'g',
                                                      map_ = diffmap,
                                                      quiet=True)

                    total_B_diff = float(map_stats['sum'])
                    mean_B_diff = float(map_stats['mean'])
                    total_A_diff = sum(diffA)
                    mean_A_diff = total_A_diff/len(diffA)

                    if DEBUG:
                        map_stats = gscript.parse_command('r.univar',
                                                          flags = 'g',
                                                          map_ = newB,
                                                          quiet=True)
                        meanB = float(map_stats['mean'])
                        meanA = float(sum(A))/len(A)
                        print("\nIteration: %d" % iterations)
                        print("mean A: %g, mean B: %g" % (meanA, meanB))
                        print("total A diff : %g, mean A diff: %g, total B diff : %g, mean B diff: %g" % (total_A_diff, mean_A_diff, total_B_diff, mean_B_diff))

                    gscript.run_command('g.rename',
                                        raster=[newB,B],
                                        overwrite=True,
                                        quiet=True)

                    tociter = timeit.default_timer()
                    if DEBUG:
                        print("Time for iteration %d : %f seconds" % (iterations, tociter-ticiter))

                if params['remove_tmps']:

                    gscript.run_command('g.remove',
                                       type_ = 'raster',
                                       name = sum_rates,
                                       flags = 'f',
                                       quiet = True)

                toc = timeit.default_timer()
                gscript.verbose("Finished constraint calculations for NACE %s in %f seconds" % (nace, toc-tic))
                if iterations == params['constraint_calculation_max_iter']:
                    gscript.warning("Too many iterations for NACE %s ! total_A_diff = %g, total_B_diff = %g" % (nace, total_A_diff, total_B_diff))

            gscript.verbose("Calculating trade matrix for NACE %s" % nace)
            tic = timeit.default_timer()

            # Now that we have values for A and B we apply them in the
            # doubly-constrained gravity formula to estimate the trade flows

            spatial_units_trade_matrix_employment = {}
            firm_matrix_map = 'firm_matrix_%d' % pid
            for i in range(len(firm_cats)):
               
                cat = firm_cats[i]
 
                if gamma > 0:
                    # When gamma is > 0 apply constrained gravity formula
                    tempdistexp = 'gravity_io_tmp_dist_exp_%s_%s_%d' % (nace, cat, pid)
                    mapcalc_expression = "%s = %e * %s * %s * %s * %s" % (firm_matrix_map, A[i], B, firm_volumes[i], dest_map, tempdistexp) 

                else:
                    # When gamma = 0, distance plays no role and we just distribute 
                    # the production of the firm to all pixels according to their 
                    # share in consumtion population
                    map_stats = gscript.parse_command('r.univar',
                                                      flags = "g",
                                                      map = dest_map)
                    mapcalc_expression = "%s = %s * (float(%s)/%s)" % (firm_matrix_map, firm_volumes[i], dest_map, map_stats['sum'])

                gscript.run_command('r.mapcalc',
                                   expression = mapcalc_expression,
                                   overwrite = True,
                                   quiet=True)

                map_stats = gscript.parse_command('r.univar',
                                                  flags = "g",
                                                  map = firm_matrix_map)

                spatial_unit = firm_spatial_units[i]
                sum_pop = float(map_stats['sum'])

                # Aggregate the export employment by pixel to given spatial units
                map_stats = gscript.read_command('r.univar',
                                                 flags="t",
                                                 map=firm_matrix_map,
                                                 zones=params['spatial_units_rast'],
                                                 separator='pipe',
                                                 quiet=True)

                firm_trade_matrix = {}
                first = True
                for line in map_stats.splitlines():
                    if first:
                        first = False
                        continue
                    data = line.split('|')
                    firm_trade_matrix[data[0]] = float(data[12])

                # We add the data of the firm to the spatial unit the firm is
                # located in
                if spatial_unit in spatial_units_trade_matrix_employment:
                    spatial_units_trade_matrix_employment[spatial_unit]['export'] += firm_exports[i]
                    spatial_units_trade_matrix_employment[spatial_unit]['investment'] += firm_investments[i]
                    for target_unit in firm_trade_matrix:
                        if target_unit in spatial_units_trade_matrix_employment[spatial_unit]:
                            spatial_units_trade_matrix_employment[spatial_unit][target_unit] += ( firm_trade_matrix[target_unit] / sum_pop ) * float(firm_volumes[i])
                        else:
                            spatial_units_trade_matrix_employment[spatial_unit][target_unit] = ( firm_trade_matrix[target_unit] / sum_pop ) * float(firm_volumes[i])
                else:
                    spatial_units_trade_matrix_employment[spatial_unit] = {}
                    spatial_units_trade_matrix_employment[spatial_unit]['export'] = firm_exports[i]
                    spatial_units_trade_matrix_employment[spatial_unit]['investment'] = firm_investments[i]
                    for target_unit in firm_trade_matrix:
                        spatial_units_trade_matrix_employment[spatial_unit][target_unit] = ( firm_trade_matrix[target_unit] / sum_pop ) * float(firm_volumes[i])

            toc = timeit.default_timer()
            gscript.verbose("Finished calculating trade matrix for NACE %s in %f seconds" % (nace, toc-tic))
                            
            if DEBUG:
                gisdbase = gscript.gisenv()['GISDBASE']
                du = subprocess.Popen(["du", "-sh", gisdbase], stdout=subprocess.PIPE)
                du_output=du.communicate()[0].rstrip()
                gscript.warning("NACE: %s, Disk usage: %s" % (nace, du_output))

            # Now remove the large number of temporary maps created during the
            # process
            if params['remove_tmps']:

                gscript.run_command('g.remove',
                                   type_ = 'raster',
                                   name = firm_matrix_map,
                                   flags = 'f',
                                   quiet = True)

                gscript.run_command('g.remove',
                                   type_ = 'raster',
                                   name = dest_map,
                                   flags = 'f',
                                   quiet = True)

                if gamma > 0:
                    gscript.run_command('g.remove',
                                       type_ = 'raster',
                                       pattern = 'gravity_io_tmp_*_%d' % pid,
                                       flags = 'f',
                                       quiet = True)


            gscript.verbose('Writing results to files')

            # In order to avoid race conditions when writing to the output file,
            # each parallel process gets its own file to write to.
            # These files need to be merged after the model run.
            with open(params['trade_matrix_employment_file'], 'a') as f:
                for orig_unit in spatial_units_trade_matrix_employment:
                    for dest_unit in spatial_units_trade_matrix_employment[orig_unit]:
                        output_string = nace + ';'
                        output_string += orig_unit + ';'
                        output_string += dest_unit + ';'
                        output_string += str(spatial_units_trade_matrix_employment[orig_unit][dest_unit]) + '\n'
                        f.write(output_string)

            bigtoc = timeit.default_timer()
            gscript.info("Finished with NACE %s in %f seconds" % (nace, bigtoc-bigtic))

            output_queue.put([nace, 'OK'])


    except:
        gscript.warning("Error in worker script:")
        raise
        

    return True
示例#41
0
def main():
    mapname = options['map']
    option = options['option']
    layer = options['layer']
    units = options['units']

    nuldev = open(os.devnull, 'w')

    if not grass.find_file(mapname, 'vector')['file']:
        grass.fatal(_("Vector map <%s> not found") % mapname)

    if int(layer) in grass.vector_db(mapname):
        colnames = grass.vector_columns(mapname,
                                        layer,
                                        getDict=False,
                                        stderr=nuldev)
        isConnection = True
    else:
        isConnection = False
        colnames = ['cat']

    if option == 'coor':
        extracolnames = ['x', 'y', 'z']
    else:
        extracolnames = [option]

    if units == 'percent':
        unitsp = 'meters'
    elif units:
        unitsp = units
    else:
        unitsp = None

    # NOTE: we suppress -1 cat and 0 cat
    if isConnection:
        f = grass.vector_db(map=mapname)[int(layer)]
        p = grass.pipe_command('v.db.select',
                               quiet=True,
                               map=mapname,
                               layer=layer)
        records1 = []
        catcol = -1
        ncols = 0
        for line in p.stdout:
            cols = decode(line).rstrip('\r\n').split('|')
            if catcol == -1:
                ncols = len(cols)
                for i in range(0, ncols):
                    if cols[i] == f['key']:
                        catcol = i
                        break
                if catcol == -1:
                    grass.fatal(
                        _("There is a table connected to input vector map '%s', but "
                          "there is no key column '%s'.") %
                        (mapname, f['key']))
                continue
            if cols[catcol] == '-1' or cols[catcol] == '0':
                continue
            records1.append(cols[:catcol] + [int(cols[catcol])] +
                            cols[(catcol + 1):])
        p.wait()
        if p.returncode != 0:
            sys.exit(1)

        records1.sort(key=lambda r: r[catcol])

        if len(records1) == 0:
            try:
                grass.fatal(
                    _("There is a table connected to input vector map '%s', but "
                      "there are no categories present in the key column '%s'. Consider using "
                      "v.to.db to correct this.") % (mapname, f['key']))
            except KeyError:
                pass

        # fetch the requested attribute sorted by cat:
        p = grass.pipe_command('v.to.db',
                               flags='p',
                               quiet=True,
                               map=mapname,
                               option=option,
                               layer=layer,
                               units=unitsp)
        records2 = []
        for line in p.stdout:
            fields = decode(line).rstrip('\r\n').split('|')
            if fields[0] in ['cat', '-1', '0']:
                continue
            records2.append([int(fields[0])] + fields[1:])
        p.wait()
        records2.sort()

        # make pre-table
        # len(records1) may not be the same as len(records2) because
        # v.db.select can return attributes that are not linked to features.
        records3 = []
        for r2 in records2:
            rec = list(filter(lambda r1: r1[catcol] == r2[0], records1))
            if len(rec) > 0:
                res = rec[0] + r2[1:]
            elif flags['d']:
                res = [r2[0]] + [''] * (ncols - 1) + r2[1:]
            else:
                continue
            records3.append(res)
    else:
        catcol = 0
        records1 = []
        p = grass.pipe_command('v.category',
                               inp=mapname,
                               layer=layer,
                               option='print')
        for line in p.stdout:
            field = int(decode(line).rstrip())
            if field > 0:
                records1.append(field)
        p.wait()
        records1.sort()
        records1 = uniq(records1)

        # make pre-table
        p = grass.pipe_command('v.to.db',
                               flags='p',
                               quiet=True,
                               map=mapname,
                               option=option,
                               layer=layer,
                               units=unitsp)
        records3 = []
        for line in p.stdout:
            fields = decode(line).rstrip('\r\n').split('|')
            if fields[0] in ['cat', '-1', '0']:
                continue
            records3.append([int(fields[0])] + fields[1:])
        p.wait()
        records3.sort()

    # print table header
    if not flags['c']:
        sys.stdout.write('|'.join(colnames + extracolnames) + '\n')

    # make and print the table:
    numcols = len(colnames) + len(extracolnames)

    # calculate percents if requested
    if units == 'percent' and option != 'coor':
        # calculate total value
        total = 0
        for r in records3:
            total += float(r[-1])

        # calculate percentages
        records4 = [float(r[-1]) * 100 / total for r in records3]
        if type(records1[0]) == int:
            records3 = [[r1] + [r4] for r1, r4 in zip(records1, records4)]
        else:
            records3 = [r1 + [r4] for r1, r4 in zip(records1, records4)]

    # sort results
    if options['sort']:
        if options['sort'] == 'asc':
            if options['option'] == 'coor':
                records3.sort(
                    key=lambda r: (float(r[-3]), float(r[-2]), float(r[-1])))
            else:
                records3.sort(key=lambda r: float(r[-1]))
        else:
            if options['option'] == 'coor':
                records3.sort(key=lambda r:
                              (float(r[-3]), float(r[-2]), float(r[-1])),
                              reverse=True)
            else:
                records3.sort(key=lambda r: float(r[-1]), reverse=True)

    for r in records3:
        sys.stdout.write('|'.join(map(str, r)) + '\n')
    consumption_dict = create_consumption_dict('../io_shares.csv')

    tempmap = 'temp_tempmap_%d' % os.getpid()

    for nace2 in gscript.read_command('v.db.select',
                                      map=firms_map,
                                      column="substr(%s, 1, 2)" % nace_column,
                                      group="substr(%s, 1, 2)" % nace_column,
                                      where="%s <> '' AND %s > 0" %
                                      (nace_column, volume_column),
                                      flags='c',
                                      quiet=True).splitlines():
        print nace2
        sql = "SELECT sum(%s) FROM %s" % (volume_column, firms_map)
        sql += " where substr(%s, 1, 2) = '%s'" % (nace_column, nace2)
        db = gscript.vector_db(firms_map)[1]['database']
        total_volume = float(
            gscript.read_command('db.select',
                                 sql=sql,
                                 database=db,
                                 flags='c',
                                 quiet=True).rstrip())

        export_volume = float(
            consumption_dict[nace2].pop('Export')) * total_volume
        investment_volume = float(
            consumption_dict[nace2].pop('Investment')) * total_volume
        local_volume = total_volume - export_volume - investment_volume
        gscript.message("nace: %s, total: %f, local: %f" %
                        (nace2, total_volume, local_volume))
        final_cons_volume = float(
示例#43
0
def main():

    global allmap
    global trainmap
    global feature_vars
    global training_vars
    global model_output_csv
    global model_output_csvt
    global temptable
    global r_commands
    global reclass_files

    allmap = trainmap = feature_vars = training_vars = None
    model_output_csv = model_output_csvt = temptable = r_commands = None
    reclass_files = None

    voting_function = "voting <- function (x, w) {\n"
    voting_function += "res <- tapply(w, x, sum, simplify = TRUE)\n"
    voting_function += "maj_class <- as.numeric(names(res)[which.max(res)])\n"
    voting_function += "prob <- as.numeric(res[which.max(res)])\n"
    voting_function += "return(list(maj_class=maj_class, prob=prob))\n}"

    weighting_functions = {}
    weighting_functions[
        'smv'] = "weights <- rep(1/length(weighting_base), length(weighting_base))"
    weighting_functions[
        'swv'] = "weights <- weighting_base/sum(weighting_base)"
    weighting_functions[
        'bwwv'] = "weights <- 1-(max(weighting_base) - weighting_base)/(max(weighting_base) - min(weighting_base))"
    weighting_functions[
        'qbwwv'] = "weights <- ((min(weighting_base) - weighting_base)/(max(weighting_base) - min(weighting_base)))**2"

    packages = {
        'svmRadial': ['kernlab'],
        'svmLinear': ['kernlab'],
        'svmPoly': ['kernlab'],
        'rf': ['randomForest'],
        'rpart': ['rpart'],
        'C5.0': ['C50'],
        'xgbTree': ['xgboost', 'plyr']
    }

    install_package = "if(!is.element('%s', installed.packages()[,1])){\n"
    install_package += "cat('\\n\\nInstalling %s package from CRAN\n')\n"
    install_package += "if(!file.exists(Sys.getenv('R_LIBS_USER'))){\n"
    install_package += "dir.create(Sys.getenv('R_LIBS_USER'), recursive=TRUE)\n"
    install_package += ".libPaths(Sys.getenv('R_LIBS_USER'))}\n"
    install_package += "chooseCRANmirror(ind=1)\n"
    install_package += "install.packages('%s', dependencies=TRUE)}"

    if options['segments_map']:
        allfeatures = options['segments_map']
        segments_layer = options['segments_layer']
        allmap = True
    else:
        allfeatures = options['segments_file']
        allmap = False

    if options['training_map']:
        training = options['training_map']
        training_layer = options['training_layer']
        trainmap = True
    else:
        training = options['training_file']
        trainmap = False

    classcol = options['train_class_column']
    output_classcol = options['output_class_column']
    output_probcol = None
    if options['output_prob_column']:
        output_probcol = options['output_prob_column']
    classifiers = options['classifiers'].split(',')
    weighting_modes = options['weighting_modes'].split(',')
    weighting_metric = options['weighting_metric']
    processes = int(options['processes'])
    folds = options['folds']
    partitions = options['partitions']
    tunelength = options['tunelength']
    separator = gscript.separator(options['separator'])
    tunegrids = literal_eval(
        options['tunegrids']) if options['tunegrids'] else {}

    classification_results = None
    if options['classification_results']:
        classification_results = options['classification_results'].replace(
            "\\", "/")

    model_details = None
    if options['model_details']:
        model_details = options['model_details'].replace("\\", "/")

    raster_segments_map = None
    if options['raster_segments_map']:
        raster_segments_map = options['raster_segments_map']

    classified_map = None
    if options['classified_map']:
        classified_map = options['classified_map']

    r_script_file = None
    if options['r_script_file']:
        r_script_file = options['r_script_file']

    accuracy_file = None
    if options['accuracy_file']:
        accuracy_file = options['accuracy_file'].replace("\\", "/")

    bw_plot_file = None
    if options['bw_plot_file']:
        bw_plot_file = options['bw_plot_file'].replace("\\", "/")

    if allmap:
        feature_vars = gscript.tempfile().replace("\\", "/")
        gscript.run_command('v.db.select',
                            map_=allfeatures,
                            file_=feature_vars,
                            layer=segments_layer,
                            quiet=True,
                            overwrite=True)
    else:
        feature_vars = allfeatures.replace("\\", "/")

    if trainmap:
        training_vars = gscript.tempfile().replace("\\", "/")
        gscript.run_command('v.db.select',
                            map_=training,
                            file_=training_vars,
                            layer=training_layer,
                            quiet=True,
                            overwrite=True)
    else:
        training_vars = training.replace("\\", "/")

    r_commands = gscript.tempfile().replace("\\", "/")

    r_file = open(r_commands, 'w')

    if processes > 1:
        install = install_package % ('doParallel', 'doParallel', 'doParallel')
        r_file.write(install)
        r_file.write("\n")

    # automatic installation of missing R packages
    install = install_package % ('caret', 'caret', 'caret')
    r_file.write(install)
    r_file.write("\n")
    install = install_package % ('e1071', 'e1071', 'e1071')
    r_file.write(install)
    r_file.write("\n")
    for classifier in classifiers:
        # knn is included in caret
        if classifier == "knn" or classifier == "knn1":
            continue
        for package in packages[classifier]:
            install = install_package % (package, package, package)
            r_file.write(install)
            r_file.write("\n")
    r_file.write("\n")
    r_file.write('require(caret)')
    r_file.write("\n")
    r_file.write(
        'features <- read.csv("%s", sep="%s", header=TRUE, row.names=1)' %
        (feature_vars, separator))
    r_file.write("\n")
    r_file.write(
        'training <- read.csv("%s", sep="%s", header=TRUE, row.names=1)' %
        (training_vars, separator))
    r_file.write("\n")
    r_file.write("training$%s <- as.factor(training$%s)" %
                 (classcol, classcol))
    r_file.write("\n")
    if processes > 1:
        r_file.write("library(doParallel)")
        r_file.write("\n")
        r_file.write("registerDoParallel(cores = %d)" % processes)
        r_file.write("\n")
    r_file.write(
        "MyFolds.cv <- createMultiFolds(training$%s, k=%s, times=%s)" %
        (classcol, folds, partitions))
    r_file.write("\n")
    r_file.write(
        "MyControl.cv <- trainControl(method='repeatedCV', index=MyFolds.cv)")
    r_file.write("\n")
    r_file.write("fmla <- %s ~ ." % classcol)
    r_file.write("\n")
    r_file.write("models.cv <- list()")
    r_file.write("\n")
    for classifier in classifiers:
        if classifier == 'knn1':
            r_file.write("Grid <- expand.grid(k=1)")
            r_file.write("\n")
            r_file.write(
                "knn1Model.cv <- train(fmla, training, method='knn', trControl=MyControl.cv, tuneGrid=Grid)"
            )
            r_file.write("\n")
            r_file.write("models.cv$knn1 <- knn1Model.cv")
            r_file.write("\n")
        else:
            if classifier in tunegrids:
                r_file.write("Grid <- expand.grid(%s)" % tunegrids[classifier])
                r_file.write("\n")
                r_file.write(
                    "%sModel.cv <- train(fmla,training,method='%s', trControl=MyControl.cv, tuneGrid=Grid)"
                    % (classifier, classifier))
            else:
                r_file.write(
                    "%sModel.cv <- train(fmla,training,method='%s', trControl=MyControl.cv, tuneLength=%s)"
                    % (classifier, classifier, tunelength))
            r_file.write("\n")
            r_file.write("models.cv$%s <- %sModel.cv" %
                         (classifier, classifier))
            r_file.write("\n")

    r_file.write("if (length(models.cv)>1) {")
    r_file.write("\n")
    r_file.write("resamps.cv <- resamples(models.cv)")
    r_file.write("\n")
    r_file.write(
        "accuracy_means <- as.vector(apply(resamps.cv$values[seq(2,length(resamps.cv$values), by=2)], 2, mean))"
    )
    r_file.write("\n")
    r_file.write(
        "kappa_means <- as.vector(apply(resamps.cv$values[seq(3,length(resamps.cv$values), by=2)], 2, mean))"
    )
    r_file.write("\n")
    r_file.write("} else {")
    r_file.write("\n")
    r_file.write("resamps.cv <- models.cv[[1]]$resample")
    r_file.write("\n")
    r_file.write("accuracy_means <- mean(resamps.cv$Accuracy)")
    r_file.write("\n")
    r_file.write("kappa_means <- mean(resamps.cv$Kappa)")
    r_file.write("\n")
    r_file.write("}")
    r_file.write("\n")
    r_file.write("predicted <- data.frame(predict(models.cv, features))")
    r_file.write("\n")
    if flags['i']:
        r_file.write(
            "resultsdf <- data.frame(id=rownames(features), predicted)")
    else:
        r_file.write("resultsdf <- data.frame(id=rownames(features))")
    r_file.write("\n")
    r_file.write(voting_function)
    r_file.write("\n")

    if weighting_metric == 'kappa':
        r_file.write("weighting_base <- kappa_means")
    else:
        r_file.write("weighting_base <- accuracy_means")
    r_file.write("\n")
    for weighting_mode in weighting_modes:
        r_file.write(weighting_functions[weighting_mode])
        r_file.write("\n")
        r_file.write("weights <- weights / sum(weights)")
        r_file.write("\n")
        r_file.write("vote <- apply(predicted, 1, voting, w=weights)")
        r_file.write("\n")
        r_file.write(
            "vote <- as.data.frame(matrix(unlist(vote), ncol=2, byrow=TRUE))")
        r_file.write("\n")
        r_file.write("resultsdf$%s_%s <- vote$V1" %
                     (output_classcol, weighting_mode))
        r_file.write("\n")
        if len(classifiers) > 1:
            r_file.write("resultsdf$%s_%s <- vote$V2" %
                         (output_probcol, weighting_mode))
            r_file.write("\n")

    if allmap and not flags['f']:
        model_output = gscript.tempfile().replace("\\", "/")
        model_output_csv = model_output + '.csv'
        write_string = "write.csv(resultsdf, '%s'," % model_output_csv
        write_string += " row.names=FALSE, quote=FALSE)"
        r_file.write(write_string)
        r_file.write("\n")
    if classified_map:
        reclass_files = {}
        if flags['i']:
            for classifier in classifiers:
                tmpfilename = gscript.tempfile()
                reclass_files[classifier] = tmpfilename.replace("\\", "/")
                r_file.write(
                    "tempdf <- data.frame(resultsdf$id, resultsdf$%s)" %
                    (classifier))
                r_file.write("\n")
                r_file.write(
                    "reclass <- data.frame(out=apply(tempdf, 1, function(x) paste(x[1],'=', x[2])))"
                )
                r_file.write("\n")
                r_file.write(
                    "write.table(reclass$out, '%s', col.names=FALSE, row.names=FALSE, quote=FALSE)"
                    % reclass_files[classifier])
                r_file.write("\n")
        for weighting_mode in weighting_modes:
            tmpfilename = gscript.tempfile()
            reclass_files[weighting_mode] = tmpfilename.replace("\\", "/")
            r_file.write(
                "tempdf <- data.frame(resultsdf$id, resultsdf$%s_%s)" %
                (output_classcol, weighting_mode))
            r_file.write("\n")
            r_file.write(
                "reclass <- data.frame(out=apply(tempdf, 1, function(x) paste(x[1],'=', x[2])))"
            )
            r_file.write("\n")
            r_file.write(
                "write.table(reclass$out, '%s', col.names=FALSE, row.names=FALSE, quote=FALSE)"
                % reclass_files[weighting_mode])
            r_file.write("\n")

    if classification_results:
        r_file.write(
            "write.csv(resultsdf, '%s', row.names=FALSE, quote=FALSE)" %
            classification_results)
        r_file.write("\n")
    if accuracy_file:
        r_file.write(
            "df_means <- data.frame(method=names(models.cv),accuracy=accuracy_means, kappa=kappa_means)"
        )
        r_file.write("\n")
        r_file.write(
            "write.csv(df_means, '%s', row.names=FALSE, quote=FALSE)" %
            accuracy_file)
        r_file.write("\n")
    if model_details:
        r_file.write("sink('%s')" % model_details)
        r_file.write("\n")
        r_file.write("cat('BEST TUNING VALUES\n')")
        r_file.write("\n")
        r_file.write("cat('******************************\n\n')")
        r_file.write("\n")
        r_file.write("lapply(models.cv, function(x) x$best)")
        r_file.write("\n")
        r_file.write("cat('\n')")
        r_file.write("\n")
        r_file.write("cat('\nSUMMARY OF RESAMPLING RESULTS\n')")
        r_file.write("\n")
        r_file.write("cat('******************************\n\n')")
        r_file.write("\n")
        r_file.write("summary(resamps.cv)")
        r_file.write("\n")
        r_file.write("cat('\n')")
        r_file.write("\n")
        r_file.write("cat('\nRESAMPLED CONFUSION MATRICES\n')")
        r_file.write("\n")
        r_file.write("cat('******************************\n\n')")
        r_file.write("\n")
        r_file.write(
            "conf.mat.cv <- lapply(models.cv, function(x) confusionMatrix(x))")
        r_file.write("\n")
        r_file.write("print(conf.mat.cv)")
        r_file.write("\n")
        r_file.write("cat('\nDETAILED CV RESULTS\n')")
        r_file.write("\n")
        r_file.write("cat('******************************\n\n')")
        r_file.write("\n")
        r_file.write("lapply(models.cv, function(x) x$results)")
        r_file.write("\n")
        r_file.write("sink()")
        r_file.write("\n")

    if bw_plot_file and len(classifiers) > 1:
        r_file.write("png('%s.png')" % bw_plot_file)
        r_file.write("\n")
        r_file.write("print(bwplot(resamps.cv))")
        r_file.write("\n")
        r_file.write("dev.off()")
    r_file.close()

    if r_script_file:
        shutil.copy(r_commands, r_script_file)

    gscript.message("Running R now. Following output is R output.")
    try:
        subprocess.check_call(
            ['Rscript', r_commands],
            stderr=subprocess.STDOUT,
        )
    except subprocess.CalledProcessError:
        gscript.fatal(
            "There was an error in the execution of the R script.\nPlease check the R output."
        )

    gscript.message("Finished running R.")

    if allmap and not flags['f']:

        model_output_csvt = model_output + '.csvt'
        temptable = 'classif_tmp_table_%d' % os.getpid()

        f = open(model_output_csvt, 'w')
        header_string = '"Integer"'
        if flags['i']:
            for classifier in classifiers:
                header_string += ',"Integer"'
        if len(classifiers) > 1:
            for weighting_mode in weighting_modes:
                header_string += ',"Integer"'
                header_string += ',"Real"'
        else:
            header_string += ',"Integer"'

        f.write(header_string)
        f.close()

        gscript.message("Loading results into attribute table")
        gscript.run_command('db.in.ogr',
                            input_=model_output_csv,
                            output=temptable,
                            overwrite=True,
                            quiet=True)
        index_creation = "CREATE INDEX idx_%s_cat" % temptable
        index_creation += " ON %s (id)" % temptable
        gscript.run_command('db.execute', sql=index_creation, quiet=True)
        columns = gscript.read_command('db.columns',
                                       table=temptable).splitlines()[1:]
        orig_cat = gscript.vector_db(allfeatures)[int(segments_layer)]['key']
        gscript.run_command('v.db.join',
                            map_=allfeatures,
                            column=orig_cat,
                            otable=temptable,
                            ocolumn='id',
                            subset_columns=columns,
                            quiet=True)

    if classified_map:
        for classification, reclass_file in reclass_files.iteritems():
            output_map = classified_map + '_' + classification
            gscript.run_command('r.reclass',
                                input=raster_segments_map,
                                output=output_map,
                                rules=reclass_file,
                                quiet=True)
示例#44
0
def main():
    global tmp_graph, tmp_group, tmp_psmap, tmp_psleg, tmp_gisleg

    breakpoints = options['breakpoints']
    colorscheme = options['colorscheme']
    column = options['column']
    endcolor = options['endcolor']
    group = options['group']
    layer = options['layer']
    linecolor = options['linecolor']
    map = options['map']
    maxsize = options['maxsize']
    monitor = options['monitor']
    nint = options['nint']
    pointcolor = options['pointcolor']
    psmap = options['psmap']
    size = options['size']
    startcolor = options['startcolor']
    themecalc = options['themecalc']
    themetype = options['themetype']
    type = options['type']
    where = options['where']
    icon = options['icon']

    flag_f = flags['f']
    flag_g = flags['g']
    flag_l = flags['l']
    flag_m = flags['m']
    flag_s = flags['s']
    flag_u = flags['u']

    layer = int(layer)
    nint = int(nint)
    size = float(size)
    maxsize = float(maxsize)

    # check column type
    inf = grass.vector_columns(map, layer)
    if column not in inf:
        grass.fatal(_("No such column <%s>") % column)
    coltype = inf[column]['type'].lower()

    if coltype not in ["integer", "double precision"]:
        grass.fatal(
            _("Column <%s> is of type <%s> which is not numeric.") %
            (column, coltype))

    # create temporary file to hold d.graph commands for legend
    tmp_graph = grass.tempfile()
    # Create temporary file to commands for GIS Manager group
    tmp_group = grass.tempfile()
    # Create temporary file for commands for ps.map map file
    tmp_psmap = grass.tempfile()
    # Create temporary file for commands for ps.map legend file
    tmp_psleg = grass.tempfile()
    # create file to hold elements for GIS Manager legend
    tmp_gisleg = grass.tempfile()

    # Set display variables for group
    atype = int(type == "area")
    ptype = int(type == "point")
    ctype = int(type == "centroid")
    ltype = int(type == "line")
    btype = int(type == "boundary")

    # if running in the GUI, do not create a graphic legend in an xmon
    if flag_s:
        flag_l = False
        # if running in GUI, turn off immediate mode rendering so that the
        # iterated d.vect commands will composite using the display driver
        os.environ['GRASS_PNG_READ'] = 'TRUE'
        os.environ['GRASS_PNG_AUTO_WRITE'] = 'FALSE'

    db = grass.vector_db(map)[1]
    if not db or not db['table']:
        grass.fatal(
            _("No table connected or layer <%s> does not exist.") % layer)
    table = db['table']
    database = db['database']
    driver = db['driver']

    # update color values to the table?
    if flag_u:
        # test, if the column GRASSRGB is in the table
        s = grass.read_command('db.columns',
                               table=table,
                               database=database,
                               driver=driver)
        if 'grassrgb' not in s.splitlines():
            msg(locals(), _("Creating column 'grassrgb' in table <$table>"))
            sql = "ALTER TABLE %s ADD COLUMN grassrgb varchar(11)" % table
            grass.write_command('db.execute',
                                database=database,
                                driver=driver,
                                stdin=sql)

    # Group name
    if not group:
        group = "themes"

    f_group = file(tmp_group, 'w')
    f_group.write("Group %s\n" % group)

    # Calculate statistics for thematic intervals
    if type == "line":
        stype = "line"
    else:
        stype = ["point", "centroid"]

    if not where:
        where = None

    stats = grass.read_command('v.univar',
                               flags='eg',
                               map=map,
                               type=stype,
                               column=column,
                               where=where,
                               layer=layer)
    stats = grass.parse_key_val(stats)

    min = float(stats['min'])
    max = float(stats['max'])
    mean = float(stats['mean'])
    sd = float(stats['population_stddev'])
    q1 = float(stats['first_quartile'])
    q2 = float(stats['median'])
    q3 = float(stats['third_quartile'])
    q4 = max

    ptsize = size

    if breakpoints and themecalc != "custom_breaks":
        grass.warning(
            _("Custom breakpoints ignored due to themecalc setting."))

    # set interval for each thematic map calculation type
    if themecalc == "interval":
        numint = nint
        step = float(max - min) / numint
        breakpoints = [min + i * step for i in xrange(numint + 1)]
        annotations = ""
    elif themecalc == "std_deviation":
        # 2 standard deviation units on either side of mean,
        # plus min to -2 sd units and +2 sd units to max, if applicable
        breakpoints = [min] + [
            i for i in [(mean + i * sd)
                        for i in [-2, -1, 0, 1, 2]] if min < i < max
        ] + [max]
        annotations = [""] + [("%dsd" % i)
                              for (i, j) in [(i, mean + i * sd)
                                             for i in [-2, -1, 0, 1, 2]]
                              if (min < j < max)] + [""]
        annotations = ";".join(annotations)
        numint = len(breakpoints) - 1
    elif themecalc == "quartiles":
        numint = 4
        # one for each quartile
        breakpoints = [min, q1, q2, q3, max]
        annotations = " %f; %f; %f; %f" % (q1, q2, q3, q4)
    elif themecalc == "custom_breaks":
        if not breakpoints:
            breakpoints = sys.stdin.read()
        breakpoints = [int(x) for x in breakpoints.split()]
        numint = len(breakpoints) - 1
        annotations = ""
    else:
        grass.fatal(_("Unknown themecalc type <%s>") % themecalc)

    pointstep = (maxsize - ptsize) / (numint - 1)

    # Prepare legend cuts for too large numint
    if numint > max_leg_items:
        xupper = int(numint - max_leg_items / 2) + 1
        xlower = int(max_leg_items / 2) + 1
    else:
        xupper = 0
        xlower = 0

    # legend title
    f_graph = file(tmp_graph, 'w')
    out(
        f_graph, locals(), """\
color 0:0:0
size 2 2
move 1 95
text Thematic map legend for column $column of map $map
size 1.5 1.8
move 4 90
text Value range: $min - $max
""")

    f_gisleg = file(tmp_gisleg, 'w')
    out(
        f_gisleg, locals(), """\
title - - - {Thematic map legend for column $column of map $map}
""")

    f_psleg = file(tmp_psleg, 'w')
    out(
        f_psleg, locals(), """\
text 1% 95% Thematic map legend for column $column of map $map
  ref bottom left
end
text 4% 90% Value range: $min - $max
  ref bottom left
end
""")

    msg(locals(), _("Thematic map legend for column $column of map $map"))
    msg(locals(), _("Value range: $min - $max"))

    colorschemes = {
        "blue-red": ("0:0:255", "255:0:0"),
        "red-blue": ("255:0:0", "0:0:255"),
        "green-red": ("0:255:0", "255:0:0"),
        "red-green": ("255:0:0", "0:255:0"),
        "blue-green": ("0:0:255", "0:255:0"),
        "green-blue": ("0:255:0", "0:0:255"),
        "cyan-yellow": ("0:255:255", "255:255:0"),
        "yellow-cyan": ("255:255:0", "0:255:255"),
        "custom_gradient": (startcolor, endcolor)
    }

    # open file for psmap instructions
    f_psmap = file(tmp_psmap, 'w')

    # graduated color thematic mapping
    if themetype == "graduated_colors":
        if colorscheme in colorschemes:
            startc, endc = colorschemes[colorscheme]
        # set color schemes for graduated color maps
        elif colorscheme == "single_color":
            if themetype == "graduated_points":
                startc = endc = linecolor
            else:
                startc = endc = pointcolor
        else:
            grass.fatal(
                _("This should not happen: parser error. Unknown color scheme %s"
                  ) % colorscheme)

        color = __builtins__.map(int, startc.split(":"))
        endcolor = __builtins__.map(int, endc.split(":"))

        #The number of color steps is one less then the number of classes
        nclrstep = numint - 1
        clrstep = [(a - b) / nclrstep for a, b in zip(color, endcolor)]

        themecolor = startc

        # display graduated color themes
        if themecalc == "interval":
            out(f_graph, locals(), """\
move 4 87
text Mapped by $numint intervals of $step
""")

            out(
                f_gisleg, locals(), """\
subtitle - - - {Mapped by $numint intervals of $step}
""")

            out(
                f_psleg, locals(), """\
text 4% 87% Mapped by $numint intervals of $step
  ref bottom left
end
""")

            msg(locals(), _("Mapped by $numint intervals of $step"))

        # display graduated color themes for standard deviation units
        if themecalc == "std_deviation":
            out(
                f_graph, locals(), """\
move 4 87
text Mapped by standard deviation units of $sd (mean = $mean)
""")

            out(
                f_gisleg, locals(), """\
subtitle - - - {Mapped by standard deviation units of $sd (mean = $mean)}
""")

            out(
                f_psleg, locals(), """\
text 4% 87% Mapped by standard deviation units of $sd (mean = $mean)
  ref bottom left
end
""")

            msg(locals(),
                _("Mapped by standard deviation units of $sd (mean = $mean)"))

        # display graduated color themes for quartiles
        if themecalc == "quartiles":
            out(f_graph, locals(), """\
move 4 87
text Mapped by quartiles (median = $q2)
""")

            out(f_gisleg, locals(), """\
subtitle - - - {Mapped by quartiles (median = $q2)}
""")

            out(
                f_psleg, locals(), """\
text 4% 87% Mapped by quartiles (median = $q2)
  ref bottom left
end
""")

            msg(locals(), _("Mapped by quartiles (median = $q2)"))

        f_graph.write("""\
move 4 83
text Color
move 14 83
text Value
move 4 80
text =====
move 14 80
text ============
""")

        f_psleg.write("""\
text 4% 83% Color
  ref bottom left
end
text 14% 83% Value
  ref bottom left
end
text 4% 80% =====
  ref bottom left
end
text 14% 80% ============
  ref bottom left
end
""")

        sys.stdout.write("Color(R:G:B)\tValue\n")
        sys.stdout.write("============\t==========\n")

        line1 = 78
        line2 = 76
        line3 = 75

        i = 1
        first = True

        while i < numint:
            if flag_m:
                # math notation
                if first:
                    closebracket = "]"
                    openbracket = "["
                    mincomparison = ">="
                    first = False
                else:
                    closebracket = "]"
                    openbracket = "]"
                    mincomparison = ">"
            else:
                closebracket = ""
                openbracket = ""
                if first:
                    mincomparison = ">="
                    first = False
                else:
                    mincomparison = ">"

            themecolor = ":".join(__builtins__.map(str, color))
            if flag_f:
                linecolor = "none"
            else:
                if type in ["line", "boundary"]:
                    linecolor = themecolor
                else:
                    linecolor = linecolor

            rangemin = __builtins__.min(breakpoints)
            rangemax = __builtins__.max(breakpoints)

            if not annotations:
                extranote = ""
            else:
                extranote = annotations[i]

            if i < xlower or i >= xupper:
                xline1 = line2 + 2
                xline3 = line2 - 1
                out(
                    f_graph, locals(), """\
color $themecolor
polygon
5 $xline1
8 $xline1
8 $xline3
5 $xline3
color $linecolor
move 5 $xline1
draw 8 $xline1
draw 8 $xline3
draw 5 $xline3
draw 5 $xline1
move 14 $line2
color 0:0:0
text $openbracket$rangemin - $rangemax$closebracket $extranote
""")
            else:
                if i == xlower:
                    out(f_graph, locals(), """\
color 0:0:0
move 10 $line2
text ...
""")
                else:
                    #undo next increment
                    line2 += 4

            if i < xlower or i >= xupper:
                out(
                    f_gisleg, locals(), """\
area $themecolor $linecolor - {$openbracket$rangemin - $rangemax$closebracket $extranote}
""")

                if type in ["line", "boundary"]:
                    out(
                        f_psleg, locals(), """\
line 5% $xline1% 8% $xline1%
  color $linecolor
end
text 14% $xline1% $openbracket$rangemin - $rangemax$closebracket $extranote
  ref center left
end
""")
                elif type in ["point", "centroid"]:
                    out(
                        f_psleg, locals(), """\
point 8% $xline1%
  color $linecolor
  fcolor $themecolor
  size $size
  symbol $icon
end
text 14% $xline1% $openbracket$rangemin - $rangemax$closebracket $extranote
  ref center left
end
""")
                else:
                    out(
                        f_psleg, locals(), """\
rectangle 5% $xline1% 8% $xline3%
  color 0:0:0
  fcolor $themecolor
end
text 14% $xline3% $openbracket$rangemin - $rangemax$closebracket DCADCA $extranote
  ref bottom left
end
""")
            else:
                if i == xlower:
                    out(
                        f_psleg, locals(), """\
color 0:0:0
text 14% $xline3% ...
  ref bottom left
end
""")

                f_gisleg.write("text - - - {...}\n")

            sys.stdout.write(
                subs(
                    locals(),
                    "$themecolor\t\t$openbracket$rangemin - $rangemax$closebracket $extranote\n"
                ))
            if not where:
                sqlwhere = subs(
                    locals(),
                    "$column $mincomparison $rangemin AND $column <= $rangemax"
                )
            else:
                sqlwhere = subs(
                    locals(),
                    "$column $mincomparison $rangemin AND $column <= $rangemax AND $where"
                )

            # update color to database?
            if flag_u:
                sql = subs(
                    locals(),
                    "UPDATE $table SET GRASSRGB = '$themecolor' WHERE $sqlwhere"
                )
                grass.write_command('db.execute',
                                    database=database,
                                    driver=driver,
                                    stdin=sql)

            # Create group for GIS Manager
            if flag_g:
                # change rgb colors to hex
                xthemecolor = "#%02X%02X%02X" % tuple(
                    __builtins__.map(int, themecolor.split(":")))
                #xlinecolor=`echo $linecolor | awk -F: '{printf("#%02X%02X%02X\n",$1,$2,$3)}'`

                if "$linecolor" == "black":
                    xlinecolor = "#000000"
                else:
                    xlinecolor = xthemecolor

                # create group entry
                out(
                    f_group, locals(), """\
  _check 1
  Vector $column = $rangemin - $rangemax
    _check 1
    map $map
    display_shape 1
    display_cat 0
    display_topo 0
    display_dir 0
    display_attr 0
    type_point $ptype
    type_line $ltype
    type_boundary $btype
    type_centroid $ctype
    type_area $atype
    type_face 0
    color $xlinecolor
    fcolor $xthemecolor
    width $ptsize
    _use_fcolor 1
    lcolor #000000
    sqlcolor 0
    icon $icon
    size $ptsize
    field $layer
    lfield $layer
    attribute
    xref left
    yref center
    lsize 8
    cat
    where $sqlwhere
    _query_text 0
    _query_edit 1
    _use_where 1
    minreg
    maxreg
    _width 0.1
  End
""")

            # display theme vector map

            grass.run_command('d.vect',
                              map=map,
                              type=type,
                              layer=layer,
                              where=sqlwhere,
                              color=linecolor,
                              fcolor=themecolor,
                              icon=icon,
                              size=ptsize)

            if type in ["line", "boundary"]:
                out(
                    f_psmap, locals(), """\
vlines $map
  type $type
  layer $layer
  where $sqlwhere
  color $linecolor
  label $rangemin - $rangemax
end
""")
            elif type in ["point", "centroid"]:
                out(
                    f_psmap, locals(), """\
vpoints $map
  type $type
  layer $layer
  where $sqlwhere
  color $linecolor
  fcolor $themecolor
  symbol $icon
  label $rangemin - $rangemax
end
""")
            else:
                out(
                    f_psmap, locals(), """\
vareas $map
  layer $layer
  where $sqlwhere
  color $linecolor
  fcolor $themecolor
  label $rangemin - $rangemax
end
""")

            # increment for next theme
            i += 1
            if i == numint:
                color = endcolor
            else:
                color = [a - b for a, b in zip(color, clrstep)]
            line1 -= 4
            line2 -= 4
            line3 -= 4

    #graduated points and line widths thematic mapping

    if themetype in ["graduated_points", "graduated_lines"]:

        #display graduated points/lines by intervals
        if themecalc == "interval":
            out(f_graph, locals(), """\
move 4 87
text Mapped by $numint intervals of $step
""")

            out(
                f_gisleg, locals(), """\
subtitle - - - {Mapped by $numint intervals of $step}
""")

            out(
                f_psleg, locals(), """\
text 4% 87% Mapped by $numint intervals of $step
  ref bottom left
end
""")

            msg(locals(), _("Mapped by $numint intervals of $step"))

        # display graduated points/lines for standard deviation units
        if themecalc == "std_deviation":

            out(
                f_graph, locals(), """\
move 4 87
text Mapped by standard deviation units of $sd (mean = $mean)
""")

            out(
                f_gisleg, locals(), """\
subtitle - - - {Mapped by standard deviation units of $sd (mean = $mean)}
""")

            out(
                f_psleg, locals(), """\
text 4% 87% Mapped by standard deviation units of $sd (mean = $mean)
  ref bottom left
end
""")

            msg(locals(),
                _("Mapped by standard deviation units of $sd (mean = $mean)"))

        # display graduated points/lines for quartiles
        if themecalc == "quartiles":

            out(f_graph, locals(), """\
move 4 87
text Mapped by quartiles (median = $q2)
""")

            out(f_gisleg, locals(), """\
subtitle - - - {Mapped by quartiles (median = $q2)}
""")

            out(
                f_psleg, locals(), """\
text 4% 87% Mapped by quartiles (median = $q2)
  ref bottom left
end
""")

            msg(locals(), _("Mapped by quartiles (median = $q2)"))

        line1 = 76
        line2 = 75

        out(
            f_graph, locals(), """\
move 4 83
text Size/width
move 25 83
text Value
move 4 80
text ==============
move 25 80
text ==============
""")

        out(
            f_psleg, locals(), """\
text 4% 83% Icon size
  ref bottom left
end
text 25% 83% Value
  ref bottom left
end
text 4% 80% ============
  ref bottom left
end
text 25% 80% ============
  ref bottom left
end
""")

        sys.stdout.write("Size/width\tValue\n")
        sys.stdout.write("==========\t=====\n")

        themecolor = pointcolor

        if flag_f:
            linecolor = "none"

        i = numint
        ptsize = maxsize

        while i >= 1:
            if flag_m:
                # math notation
                if i == 1:
                    closebracket = "]"
                    openbracket = "["
                    mincomparison = ">="
                else:
                    closebracket = "]"
                    openbracket = "]"
                    mincomparison = ">"
            else:
                closebracket = ""
                openbracket = ""
                if i == 1:
                    mincomparison = ">="
                else:
                    mincomparison = ">"

            themecolor = pointcolor

            if flag_f:
                linecolor = "none"

            rangemin = __builtins__.min(breakpoints)
            rangemax = __builtins__.max(breakpoints)

            if not annotations:
                extranote = ""
            else:
                extranote = annotations[i]

            iconsize = int(ptsize / 2)
            lineht = int(ptsize / 4)
            if lineht < 4:
                lineht = 4

            if i < xlower or i >= xupper:
                if themetype == "graduated_lines":
                    out(f_graph, locals(), """\
color $linecolor
""")

                    out(
                        f_gisleg, locals(), """\
line $themecolor $linecolor $ptsize {$openbracket$rangemin - $rangemax$closebracket $extranote}
""")
                else:
                    out(f_graph, locals(), """\
color $themecolor
""")
                    out(
                        f_gisleg, locals(), """\
point $themecolor $linecolor $ptsize {$openbracket$rangemin - $rangemax$closebracket $extranote}
""")

                out(
                    f_graph, locals(), """\
icon + $iconsize 5 $line1
color 0:0:0
move 10 $line2
text $ptsize pts
move 25 $line2
text $openbracket$rangemin - $rangemax$closebracket $extranote
""")
            else:
                if i == xlower:
                    out(f_graph, locals(), """\
color 0:0:0
move 10 $line2
text ...
""")

                    out(f_gisleg, locals(), """\
text - - - ...
""")
                else:
                    # undo future line increment
                    line2 += lineht

            if i < xlower or i >= xupper:
                out(
                    f_psleg, locals(), """\
point 8% $line1%
  color $linecolor
  fcolor $themecolor
  size $iconsize
  symbol $icon
end
text 25% $line1% $openbracket$rangemin - $rangemax$closebracket $extranote
  ref center left
end
""")
            else:
                if i == xlower:
                    out(f_psleg, locals(), """\
text 25% $xline1% ...
   ref center left
end
""")

            sys.stdout.write(
                subs(
                    locals(),
                    "$ptsize\t\t$openbracket$rangemin - $rangemax$closebracket $extranote\n"
                ))

            if not where:
                sqlwhere = subs(
                    locals(),
                    "$column $mincomparison $rangemin AND $column <= $rangemax"
                )
            else:
                sqlwhere = subs(
                    locals(),
                    "$column $mincomparison $rangemin AND $column <= $rangemax AND $where"
                )

            # update color to database?
            if flag_u:
                sql = subs(
                    locals(),
                    "UPDATE $table SET grassrgb = '$themecolor' WHERE $sqlwhere"
                )
                grass.write_command('db.execute',
                                    database=database,
                                    driver=driver,
                                    stdin=sql)

            # Create group for GIS Manager
            if flag_g:
                # change rgb colors to hex
                xthemecolor = "#%02X%02X%02X" % tuple(
                    __builtins__.map(int, themecolor.split(":")))
                xlinecolor = "#000000"

                # create group entry
                out(
                    f_group, locals(), """\
  _check 1
  Vector $column = $rangemin - $rangemax
    _check 1
    map $map
    display_shape 1
    display_cat 0
    display_topo 0
    display_dir 0
    display_attr 0
    type_point $ptype
    type_line $ltype
    type_boundary $btype
    type_centroid $ctype
    type_area $atype
    type_face 0
    color $xlinecolor
    width $ptsize
    fcolor $xthemecolor
    _use_fcolor 1
    lcolor #000000
    sqlcolor 0
    icon $icon
    size $ptsize
    field $layer
    lfield $layer
    attribute
    xref left
    yref center
    lsize 8
    cat
    where $sqlwhere
    _query_text 0
    _query_edit 1
    _use_where 1
    minreg
    maxreg
    _width 0.1
  End
""")

            #graduates line widths or point sizes

            if themetype == "graduated_lines":
                grass.run_command('d.vect',
                                  map=map,
                                  type=type,
                                  layer=layer,
                                  where=sqlwhere,
                                  color=linecolor,
                                  fcolor=themecolor,
                                  icon=icon,
                                  size=ptsize,
                                  width=ptsize)

            else:
                grass.run_command('d.vect',
                                  map=map,
                                  type=type,
                                  layer=layer,
                                  where=sqlwhere,
                                  color=linecolor,
                                  fcolor=themecolor,
                                  icon=icon,
                                  size=ptsize)

                out(
                    f_psmap, locals(), """\
vpoints $map
  type $type
  layer $layer
  where $sqlwhere
  color $linecolor
  fcolor $themecolor
  symbol $icon
  size $ptsize
  label $rangemin - $rangemax
end
""")

            ptsize -= pointstep

            line1 -= lineht
            line2 -= lineht
            i -= 1

    # Create graphic legend
    f_graph.close()
    if flag_l:
        grass.run_command('d.erase')
        grass.run_command('d.graph', input=tmp_graph)

    # Create group file for GIS Manager
    f_group.write("End\n")
    f_group.close()
    if flag_g:
        shutil.copyfile(tmp_group, "%s.dm" % group)

    # Create ps.map map file
    f_psmap.write("end\n")
    f_psmap.close()
    if psmap:
        shutil.copyfile(tmp_psmap, "%s.psmap" % psmap)

    # Create ps.map legend file
    f_psleg.write("end\n")
    f_psleg.close()
    if psmap:
        shutil.copyfile(tmp_psleg, "%s_legend.psmap" % psmap)

    # Create text file to use with d.graph in GIS Manager
    f_gisleg.close()
    if flag_s:
        tmpdir = os.path.dirname(tmp_gisleg)
        tlegfile = os.path.join(tmpdir, "gismlegend.txt")
        shutil.copyfile(tmp_gisleg, tlegfile)
示例#45
0
def main():
    color  = options['color']
    column = options['column']
    layer  = options['layer']
    map    = options['map']
    range  = options['range']
    raster = options['raster']
    rgb_column = options['rgb_column']
    rules  = options['rules']
    flip   = flags['n']
    
    global tmp, tmp_colr, tmp_vcol
    pid = os.getpid()
    tmp = tmp_colr = tmp_vcol = None
    
    mapset = grass.gisenv()['MAPSET']
    gisbase = os.getenv('GISBASE')
    
    # does map exist in CURRENT mapset?
    kv = grass.find_file(map, element = 'vector', mapset = mapset)
    if not kv['file']:
        grass.fatal(_("Vector map <%s> not found in current mapset") % map)
    
    vector = map.split('@', 1)
    
    # sanity check mutually exclusive color options
    if not options['color'] and not options['raster'] and not options['rules']:
        grass.fatal(_("Pick one of color, rules, or raster options"))
    
    if color:
        #### check the color rule is valid
        color_opts = os.listdir(os.path.join(gisbase, 'etc', 'colors'))
        color_opts += ['random', 'grey.eq', 'grey.log', 'rules']
        if color not in color_opts:
            grass.fatal(_("Invalid color rule <%s>\n") % color +
                        _("Valid options are: %s") % ' '.join(color_opts))
    elif raster:
        if not grass.find_file(raster)['name']:
            grass.fatal(_("Raster raster map <%s> not found") % raster)
    elif rules:
        if not os.access(rules, os.R_OK):
            grass.fatal(_("Unable to read color rules file <%s>") % rules)
    
    # column checks
    # check input data column
    cols = grass.vector_columns(map, layer = layer)
    if column not in cols:
        grass.fatal(_("Column <%s> not found") % column)
    ncolumn_type = cols[column]['type']
    if ncolumn_type not in ["INTEGER", "DOUBLE PRECISION"]:
        grass.fatal(_("Column <%s> is not numeric but %s") % (column, ncolumn_type))
    
    # check if GRASSRGB column exists, make it if it doesn't
    table = grass.vector_db(map)[int(layer)]['table']
    if rgb_column not in cols:
        # RGB Column not found, create it
        grass.message(_("Creating column <%s>...") % rgb_column)
        try:
            grass.run_command('v.db.addcolumn', map = map, layer = layer, column = "%s varchar(11)" % rgb_column)
        except CalledModuleError:
            grass.fatal(_("Creating color column"))
    else:
        column_type = cols[rgb_column]['type']
        if column_type not in ["CHARACTER", "TEXT"]:
            grass.fatal(_("Column <%s> is not of compatible type (found %s)") % (rgb_column, column_type))
        else:
            num_chars = dict([(v[0], int(v[2])) for v in grass.db_describe(table)['cols']])[rgb_column]
            if num_chars < 11:
                grass.fatal(_("Color column <%s> is not wide enough (needs 11 characters)"), rgb_column)
    
    cvals = grass.vector_db_select(map, layer = int(layer), columns = column)['values'].values()
    
    # find data range
    if range:
        # order doesn't matter
        vals = range.split(',')
    else:
        grass.message(_("Scanning values..."))
        vals = [float(x[0]) for x in cvals]
    
    minval = min(vals)
    maxval = max(vals)

    grass.verbose(_("Range: [%s, %s]") % (minval, maxval))
    if minval is None or maxval is None:
        grass.fatal(_("Scanning data range"))

    # setup internal region
    grass.use_temp_region()
    grass.run_command('g.region', rows = 2, cols = 2)
    
    tmp_colr = "tmp_colr_%d" % pid
    
    # create dummy raster map
    if ncolumn_type == "INTEGER":
        grass.mapcalc("$tmp_colr = int(if(row() == 1, $minval, $maxval))",
                      tmp_colr = tmp_colr, minval = minval, maxval = maxval)
    else:
        grass.mapcalc("$tmp_colr = double(if(row() == 1, $minval, $maxval))",
                      tmp_colr = tmp_colr, minval = minval, maxval = maxval)
    
    if color:
        color_cmd = {'color': color}
    elif raster:
        color_cmd = {'raster': raster}
    elif rules:
        color_cmd = {'rules': rules}
    
    if flip:
        flip_flag = 'n'
    else:
        flip_flag = ''
    
    grass.run_command('r.colors', map = tmp_colr, flags = flip_flag, quiet = True, **color_cmd)
    
    tmp = grass.tempfile()
    
    # calculate colors and write SQL command file
    grass.message(_("Looking up colors..."))
    
    f = open(tmp, 'w')
    p = grass.feed_command('r.what.color', flags = 'i', input = tmp_colr, stdout = f)
    lastval = None
    for v in sorted(vals):
        if v == lastval:
            continue
        p.stdin.write('%f\n' % v)
    p.stdin.close()
    p.wait()
    f.close()
    
    tmp_vcol = "%s_vcol.sql" % tmp
    fi = open(tmp, 'r')
    fo = open(tmp_vcol, 'w')
    t = string.Template("UPDATE $table SET $rgb_column = '$colr' WHERE $column = $value;\n")
    found = 0
    for line in fi:
        [value, colr] = line.split(': ')
        colr = colr.strip()
        if len(colr.split(':')) != 3:
            continue
        fo.write(t.substitute(table = table, rgb_column = rgb_column, colr = colr, column = column, value = value))
        found += 1
    fi.close()
    fo.close()
    
    if not found:
        grass.fatal(_("No values found in color range"))
    
    # apply SQL commands to update the table with values
    grass.message(_("Writing %s colors...") % found)
    
    try:
        grass.run_command('db.execute', input = tmp_vcol)
    except CalledModuleError:
        grass.fatal(_("Processing SQL transaction"))
    
    if flags['s']:
        vcolors = "vcolors_%d" % pid
        grass.run_command('g.rename', raster = (tmp_colr, vcolors), quiet = True)
        grass.message(_("Raster map containing color rules saved to <%s>") % vcolors)
        # TODO save full v.colors command line history
        grass.run_command('r.support', map = vcolors,
                          history = "",
                          source1 = "vector map = %s" % map,
                          source2 = "column = %s" % column,
                          title = _("Dummy raster to use as thematic vector legend"),
                          description = "generated by v.colors using r.mapcalc")
        grass.run_command('r.support', map = vcolors,
                          history = _("RGB saved into <%s> using <%s%s%s>") % (rgb_column, color, raster, rules))
示例#46
0
def main():
    if flags['r'] and flags['s']:
	grass.fatal(_("Either -r or -s flag"))

    mapname = options['map']
    option = options['option']
    layer = options['layer']
    units = options['units']

    nuldev = file(os.devnull, 'w')

    if not grass.find_file(mapname, 'vector')['file']:
	grass.fatal(_("Vector map '%s' not found in mapset search path.") % mapname)

    colnames = grass.vector_columns(mapname, layer, getDict = False, stderr = nuldev)
    if not colnames:
	colnames = ['cat']

    if option == 'coor':
	columns = ['dummy1','dummy2','dummy3']
	extracolnames = ['x','y','z']
    else:
	columns = ['dummy1']
	extracolnames = [option]

    if units in ['p','percent']:
	unitsp = 'meters'
    elif units:
	unitsp = units
    else:
	unitsp = None

    # NOTE: we suppress -1 cat and 0 cat

    if colnames:
	p = grass.pipe_command('v.db.select', quiet = True, flags='c', map = mapname, layer = layer)
	records1 = []
	for line in p.stdout:
	    cols = line.rstrip('\r\n').split('|')
	    if cols[0] == '0':
		continue
	    records1.append([int(cols[0])] + cols[1:])
	p.wait()
        if p.returncode != 0:
            sys.exit(1)
        
	records1.sort()

	if len(records1) == 0:
            try:
                f = grass.vector_db(map = mapname)[int(layer)]
                grass.fatal(_("There is a table connected to input vector map '%s', but"
                              "there are no categories present in the key column '%s'. Consider using"
                              "v.to.db to correct this.") % (mapname, f['key']))
            except KeyError:
                pass

	#fetch the requested attribute sorted by cat:
	p = grass.pipe_command('v.to.db', flags = 'p',
                               quiet = True,
			       map = mapname, option = option, columns = columns,
			       layer = layer, units = unitsp)
	records2 = []
	for line in p.stdout:
	    fields = line.rstrip('\r\n').split('|')
	    if fields[0] in ['cat', '-1', '0']:
		continue
	    records2.append([int(fields[0])] + fields[1:])
	p.wait()
	records2.sort()

	#make pre-table
	records3 = [r1 + r2[1:] for r1, r2 in zip(records1, records2)]
    else:
	records1 = []
        p = grass.pipe_command('v.category', inp = mapname, layer = layer, option = 'print')
	for line in p.stdout:
	    field = int(line.rstrip())
	    if field > 0:
		records1.append(field)
	p.wait()
	records1.sort()
	records1 = uniq(records1)

        #make pre-table
	p = grass.pipe_command('v.to.db', flags = 'p',
			       map = mapname, option = option, columns = columns,
			       layer = layer, units = unitsp)
	records3 = []
	for line in p.stdout:
	    fields = line.split('|')
	    if fields[0] in ['cat', '-1', '0']:
		continue
	    records3.append([int(fields[0])] + fields[1:])
	p.wait()
	records3.sort()

    # print table header
    sys.stdout.write('|'.join(colnames + extracolnames) + '\n')

    #make and print the table:
    numcols = len(colnames) + len(extracolnames)

    # calculate percents if requested
    if units != '' and units in ['p','percent']:
	# calculate total area value
	areatot = 0
	for r in records3:
	    areatot += float(r[-1])

	# calculate area percentages
	records4 = [float(r[-1]) * 100 / areatot for r in records3]
	records3 = [r1 + [r4] for r1, r4 in zip(records1, records4)]

    if flags['s']:
	# sort
	records3.sort(key = lambda r: (r[0], r[-1]))
    elif flags['r']:
	# reverse sort
	records3.sort(key = lambda r: (r[0], r[-1]), reverse = True)

    for r in records3:
	sys.stdout.write('|'.join(map(str,r)) + '\n')
示例#47
0
def main():
    # old connection
    old_database = options['old_database']
    old_schema = options['old_schema']
    # new connection
    default_connection = grass.db_connection()
    if options['new_driver']:
        new_driver = options['new_driver']
    else:
        new_driver = default_connection['driver']
    if options['new_database']:
        new_database = options['new_database']
    else:
        new_database = default_connection['database']
    if options['new_schema']:
        new_schema = options['new_schema']
    else:
        new_schema = default_connection['schema']

    if old_database == '':
    	old_database = None
    old_database_subst = None
    if old_database is not None:
	old_database_subst = substitute_db(old_database)

    new_database_subst = substitute_db(new_database)
    
    if old_database_subst == new_database_subst and old_schema == new_schema:
	grass.fatal(_("Old and new database connection is identical. Nothing to do."))
    
    mapset = grass.gisenv()['MAPSET']
        
    vectors = grass.list_grouped('vect')[mapset]
    num_vectors = len(vectors)

    if flags['c']:
	# create new database if not existing
	create_db(new_driver, new_database)
    
    i = 0
    for vect in vectors:
        vect = "%s@%s" % (vect, mapset)
        i += 1
	grass.message(_("%s\nReconnecting vector map <%s> (%d of %d)...\n%s") % \
                          ('-' * 80, vect, i, num_vectors, '-' * 80))
        for f in grass.vector_db(vect, stderr = nuldev).itervalues():
            layer = f['layer']
            schema_table = f['table']
            key = f['key']
            database = f['database']
            driver = f['driver']
            
            # split schema.table
            if '.' in schema_table:
                schema, table = schema_table.split('.', 1)
            else:
                schema = ''
                table = schema_table
            
            if new_schema:
                new_schema_table = "%s.%s" % (new_schema, table)
            else:
                new_schema_table = table
            
            grass.debug("DATABASE = '%s' SCHEMA = '%s' TABLE = '%s' ->\n"
                        "      NEW_DATABASE = '%s' NEW_SCHEMA_TABLE = '%s'" % \
                            (old_database, schema, table, new_database, new_schema_table))

            do_reconnect = True
	    if old_database_subst is not None:
		if database != old_database_subst:
		    do_reconnect = False
	    if database == new_database_subst:
		do_reconnect = False
	    if schema != old_schema:
		do_reconnect = False
		
            if do_reconnect == True:
                grass.verbose(_("Reconnecting layer %d...") % layer)
                                          
                if flags['c']:
                    # check if table exists in new database
                    copy_tab(driver, database, schema_table,
                             new_driver, new_database, new_schema_table)
                
                # drop original table if required
                if flags['d']:
                    drop_tab(vect, layer, schema_table, driver, substitute_db(database))

                # reconnect tables (don't use substituted new_database)
		# NOTE: v.db.connect creates an index on the key column
                try:
                    grass.run_command('v.db.connect', flags = 'o', quiet = True, map = vect,
                                      layer = layer, driver = new_driver, database = new_database,
                                      table = new_schema_table, key = key)
                except CalledModuleError:
                    grass.warning(_("Unable to connect table <%s> to vector <%s> on layer <%s>") %
				  (table, vect, str(layer)))

            else:
		if database != new_database_subst:
		    grass.warning(_("Layer <%d> will not be reconnected because "
				    "database or schema do not match.") % layer)
	
    return 0
def main():
    global TMPLOC, GISDBASE
    global orgenv, switchloc

    partner_regions = options['partner_regions']
    partner_regions_layer = options['partner_regions_layer']
    partner_id_column = options['partner_id']
    grid_points = options['grid_points']
    grid_points_layer = options['grid_points_layer']
    all_partner_id_column = options['all_partner_id']

    basins = options['basins']
    basins_layer = options['basins_layer']

    output = options['output']
    output_layer = options['output_layer']
    output_format = options['format']

    orgenv = gscript.gisenv()
    GISDBASE = orgenv['GISDBASE']
    TMPLOC = 'ECMWF_temp_location_' + str(os.getpid())

    # import grid points with v.in.ogr into new location
    kwargs = dict()
    if grid_points_layer:
        kwargs['layer'] = grid_points_layer
    gscript.run_command('v.in.ogr',
                        input=grid_points,
                        output="grid_points",
                        location=TMPLOC,
                        **kwargs)
    del kwargs

    # switch to new location
    gscript.run_command('g.mapset', location=TMPLOC, mapset="PERMANENT")
    switchloc = True

    # check if we have an attribute table
    dbinfo = gscript.vector_db("grid_points")
    if 1 not in dbinfo.keys():
        # add new table
        gscript.run_command('v.db.addtable', map="grid_points")
        dbinfo = gscript.vector_db("grid_points")

    # check if the column all_partner_id_column exists
    columns = gscript.read_command('v.info', map="grid_points", flags="c")

    found = False
    for line in columns.splitlines():
        colname = line.split("|", 1)[1]
        if colname == all_partner_id_column:
            found = True

    if found is False:
        # add column
        gscript.run_command('v.db.addcolumn',
                            map="grid_points",
                            column="%s varchar(255)" % (all_partner_id_column))
    else:
        # clear column entries
        table = dbinfo[1]['table']
        database = dbinfo[1]['database']
        driver = dbinfo[1]['driver']
        sqlcmd = "UPDATE %s SET %s=NULL" % (table, all_partner_id_column)
        gscript.write_command('db.execute',
                              input='-',
                              database=database,
                              driver=driver,
                              stdin=sqlcmd)

    # import all partner polygons with v.import
    # need to snap, assume units are meters !!!

    kwargs = dict()
    if partner_regions_layer:
        kwargs['layer'] = partner_regions_layer
    gscript.run_command('v.import',
                        input=partner_regions,
                        output="partner_regions_1",
                        snap="0.01",
                        **kwargs)
    del kwargs

    # the column partner_id_column must exist
    columns = gscript.read_command('v.info',
                                   map="partner_regions_1",
                                   flags="c")

    found = False
    for line in columns.splitlines():
        colname = line.split("|", 1)[1]
        if colname == partner_id_column:
            found = True

    if found is False:
        gscript.fatal("Column <%s> not found in input <%s>" %
                      (partner_id_column, partner_regions))

    # clean partner regions
    # clean up overlapping parts and gaps smaller mingapsize
    mingapsize = 10000000
    gscript.run_command('v.clean',
                        input="partner_regions_1",
                        output="partner_regions_2",
                        tool="rmarea",
                        thresh=mingapsize,
                        flags="c")

    # import river basins with v.import
    # need to snap, assume units are meters !!!

    kwargs = dict()
    if basins_layer:
        kwargs['layer'] = basins_layer
    gscript.run_command('v.import',
                        input=basins,
                        output="basins",
                        snap="10",
                        **kwargs)
    del kwargs

    # add new column basin_cat to gird_points
    gscript.run_command('v.db.addcolumn',
                        map="grid_points",
                        column="basin_cat integer")

    # upload basin cat to grid points
    gscript.run_command('v.what.vect',
                        map="grid_points",
                        column="basin_cat",
                        query_map="basins",
                        query_column="cat")

    # combine basins and partner regions with v.overlay with snap=0.01
    gscript.run_command('v.overlay',
                        ainput="basins",
                        atype="area",
                        binput="partner_regions_2",
                        btype="area",
                        operator="and",
                        output="basins_partners",
                        olayer="1,0,0",
                        snap="0.01")

    # select all basin cats from grid points
    basincats = gscript.read_command('v.db.select',
                                     map="grid_points",
                                     column="basin_cat",
                                     where="basin_cat is not null",
                                     flags="c")

    basincatsint = [int(c) for c in basincats.splitlines()]
    basincatsint = sorted(set(basincatsint))

    # loop over basin cats
    gscript.message(
        _("Updating grid points with partner region IDs for %d basins, this can take some time.."
          ) % (len(basincatsint)))
    for bcat in basincatsint:

        # for each basin cat, select all partner ids from the overlay
        pcats = gscript.read_command('v.db.select',
                                     map="basins_partners",
                                     column="b_%s" % (partner_id_column),
                                     where="a_cat = %d" % (bcat),
                                     flags="c")

        # create comma-separated list and upload to grid points,
        # column all_partner_id_column
        if len(pcats) > 0:
            pcatlist = []
            for c in pcats.splitlines():
                # the MOU_IDS column can already contain a comma-separated list of IDs
                for cc in c.split(','):
                    pcatlist.append(int(cc))

            pcatlist = sorted(set(pcatlist))
            pcatstring = ','.join(str(c) for c in pcatlist)
            gscript.run_command('v.db.update',
                                map="grid_points",
                                column=all_partner_id_column,
                                value=pcatstring,
                                where="basin_cat = %d" % (bcat),
                                quiet=True)

    # export updated grid points
    kwargs = dict()
    if output_layer:
        kwargs['output_layer'] = output_layer
    gscript.run_command('v.out.ogr',
                        input="grid_points",
                        output=output,
                        type="point",
                        format=output_format,
                        flags="s",
                        **kwargs)
    del kwargs

    return 0
示例#49
0
def main():
    global tmp_graph, tmp_group, tmp_psmap, tmp_psleg, tmp_gisleg

    breakpoints = options['breakpoints']
    colorscheme = options['colorscheme']
    column = options['column']
    endcolor = options['endcolor']
    group = options['group']
    layer = options['layer']
    linecolor = options['linecolor']
    map = options['map']
    maxsize = options['maxsize']
    monitor = options['monitor']
    nint = options['nint']
    pointcolor = options['pointcolor']
    psmap = options['psmap']
    size = options['size']
    startcolor = options['startcolor']
    themecalc = options['themecalc']
    themetype = options['themetype']
    type = options['type']
    where = options['where']
    icon = options['icon']

    flag_f = flags['f']
    flag_g = flags['g']
    flag_l = flags['l']
    flag_m = flags['m']
    flag_s = flags['s']
    flag_u = flags['u']

    layer = int(layer)
    nint = int(nint)
    size = float(size)
    maxsize = float(maxsize)

    # check column type
    inf = grass.vector_columns(map, layer)
    if column not in inf:
	grass.fatal(_("No such column <%s>") % column)
    coltype = inf[column]['type'].lower()

    if coltype not in ["integer", "double precision"]:
	grass.fatal(_("Column <%s> is of type <%s> which is not numeric.") % (column, coltype))

    # create temporary file to hold d.graph commands for legend
    tmp_graph = grass.tempfile()
    # Create temporary file to commands for GIS Manager group
    tmp_group = grass.tempfile()
    # Create temporary file for commands for ps.map map file
    tmp_psmap = grass.tempfile()
    # Create temporary file for commands for ps.map legend file
    tmp_psleg = grass.tempfile()
    # create file to hold elements for GIS Manager legend
    tmp_gisleg = grass.tempfile()

    # Set display variables for group
    atype = int(type == "area")
    ptype = int(type == "point")
    ctype = int(type == "centroid")
    ltype = int(type == "line")
    btype = int(type == "boundary")

    # if running in the GUI, do not create a graphic legend in an xmon
    if flag_s:
        flag_l = False
        # if running in GUI, turn off immediate mode rendering so that the
        # iterated d.vect commands will composite using the display driver
        os.environ['GRASS_PNG_READ'] = 'TRUE'
        os.environ['GRASS_PNG_AUTO_WRITE'] = 'FALSE'

    db = grass.vector_db(map)[1]
    if not db or not db['table']:
        grass.fatal(_("No table connected or layer <%s> does not exist.") % layer)
    table = db['table']
    database = db['database']
    driver = db['driver']

    # update color values to the table?
    if flag_u:
        # test, if the column GRASSRGB is in the table
        s = grass.read_command('db.columns', table = table, database = database, driver = driver)
        if 'grassrgb' not in s.splitlines():
            msg(locals(), _("Creating column 'grassrgb' in table <$table>"))
            sql = "ALTER TABLE %s ADD COLUMN grassrgb varchar(11)" % table
            grass.write_command('db.execute', database = database, driver = driver, stdin = sql)

    # Group name
    if not group:
        group = "themes"

    f_group = file(tmp_group, 'w')
    f_group.write("Group %s\n" % group)

    # Calculate statistics for thematic intervals
    if type == "line":
        stype = "line"
    else:
        stype = ["point", "centroid"]

    if not where:
        where = None

    stats = grass.read_command('v.univar', flags = 'eg', map = map, type = stype, column = column, where = where, layer = layer)
    stats = grass.parse_key_val(stats)

    min  = float(stats['min'])
    max  = float(stats['max'])
    mean = float(stats['mean'])
    sd   = float(stats['population_stddev'])
    q1   = float(stats['first_quartile'])
    q2   = float(stats['median'])
    q3   = float(stats['third_quartile'])
    q4   = max

    ptsize = size

    if breakpoints and themecalc != "custom_breaks":
        grass.warning(_("Custom breakpoints ignored due to themecalc setting."))

    # set interval for each thematic map calculation type
    if themecalc == "interval":
        numint = nint
        step = float(max - min) / numint
        breakpoints = [min + i * step for i in xrange(numint + 1)]
        annotations = ""
    elif themecalc == "std_deviation":
        # 2 standard deviation units on either side of mean,
        # plus min to -2 sd units and +2 sd units to max, if applicable
        breakpoints = [min] + [i for i in [(mean + i * sd) for i in [-2,-1,0,1,2]] if min < i < max] + [max]
        annotations = [""] + [("%dsd" % i) for (i, j) in [(i, mean + i * sd) for i in [-2,-1,0,1,2]] if (min < j < max)] + [""]
        annotations = ";".join(annotations)
        numint = len(breakpoints) - 1
    elif themecalc == "quartiles":
        numint=4
        # one for each quartile
        breakpoints = [min, q1, q2, q3, max]
        annotations = " %f; %f; %f; %f" % (q1, q2, q3, q4)
    elif themecalc == "custom_breaks":
        if not breakpoints:
            breakpoints = sys.stdin.read()
        breakpoints = [int(x) for x in breakpoints.split()]
        numint = len(breakpoints) - 1
        annotations = ""
    else:
        grass.fatal(_("Unknown themecalc type <%s>") % themecalc)

    pointstep = (maxsize - ptsize) / (numint - 1)

    # Prepare legend cuts for too large numint
    if numint > max_leg_items:
        xupper = int(numint - max_leg_items / 2) + 1
        xlower = int(max_leg_items / 2) + 1
    else:
        xupper = 0
        xlower = 0

    # legend title
    f_graph = file(tmp_graph, 'w')
    out(f_graph, locals(), """\
color 0:0:0
size 2 2
move 1 95
text Thematic map legend for column $column of map $map
size 1.5 1.8
move 4 90
text Value range: $min - $max
""")

    f_gisleg = file(tmp_gisleg, 'w')
    out(f_gisleg, locals(), """\
title - - - {Thematic map legend for column $column of map $map}
""")

    f_psleg = file(tmp_psleg, 'w')
    out(f_psleg, locals(), """\
text 1% 95% Thematic map legend for column $column of map $map
  ref bottom left
end
text 4% 90% Value range: $min - $max
  ref bottom left
end
""")

    msg(locals(), _("Thematic map legend for column $column of map $map"))
    msg(locals(), _("Value range: $min - $max"))

    colorschemes = {
        "blue-red":		("0:0:255",	"255:0:0"),
        "red-blue":		("255:0:0",	"0:0:255"),
        "green-red":	("0:255:0",	"255:0:0"),
        "red-green":	("255:0:0",	"0:255:0"),
        "blue-green":	("0:0:255",	"0:255:0"),
        "green-blue":	("0:255:0",	"0:0:255"),
        "cyan-yellow":	("0:255:255",	"255:255:0"),
        "yellow-cyan":	("255:255:0",	"0:255:255"),
        "custom_gradient":	(startcolor,	endcolor)
        }

    # open file for psmap instructions
    f_psmap = file(tmp_psmap, 'w')

    # graduated color thematic mapping
    if themetype == "graduated_colors":
        if colorscheme in colorschemes:
            startc, endc = colorschemes[colorscheme]
        # set color schemes for graduated color maps
        elif colorscheme == "single_color":
            if themetype == "graduated_points":
                startc = endc = linecolor
            else:
                startc = endc = pointcolor
        else:
            grass.fatal(_("This should not happen: parser error. Unknown color scheme %s") % colorscheme)

        color = __builtins__.map(int, startc.split(":"))
        endcolor = __builtins__.map(int, endc.split(":"))

        #The number of color steps is one less then the number of classes
        nclrstep = numint - 1
        clrstep = [(a - b) / nclrstep for a, b in zip(color, endcolor)]

        themecolor = startc

        # display graduated color themes
        if themecalc == "interval":
            out(f_graph, locals(), """\
move 4 87
text Mapped by $numint intervals of $step
""")

            out(f_gisleg, locals(), """\
subtitle - - - {Mapped by $numint intervals of $step}
""")

            out(f_psleg, locals(), """\
text 4% 87% Mapped by $numint intervals of $step
  ref bottom left
end
""")

            msg(locals(), _("Mapped by $numint intervals of $step"))

        # display graduated color themes for standard deviation units
        if themecalc == "std_deviation":
            out(f_graph, locals(), """\
move 4 87
text Mapped by standard deviation units of $sd (mean = $mean)
""")

            out(f_gisleg, locals(), """\
subtitle - - - {Mapped by standard deviation units of $sd (mean = $mean)}
""")

            out(f_psleg, locals(), """\
text 4% 87% Mapped by standard deviation units of $sd (mean = $mean)
  ref bottom left
end
""")

            msg(locals(), _("Mapped by standard deviation units of $sd (mean = $mean)"))

        # display graduated color themes for quartiles
        if themecalc == "quartiles":
            out(f_graph, locals(), """\
move 4 87
text Mapped by quartiles (median = $q2)
""")

            out(f_gisleg, locals(), """\
subtitle - - - {Mapped by quartiles (median = $q2)}
""")

            out(f_psleg, locals(), """\
text 4% 87% Mapped by quartiles (median = $q2)
  ref bottom left
end
""")

            msg(locals(), _("Mapped by quartiles (median = $q2)"))

        f_graph.write("""\
move 4 83
text Color
move 14 83
text Value
move 4 80
text =====
move 14 80
text ============
""")

        f_psleg.write("""\
text 4% 83% Color
  ref bottom left
end
text 14% 83% Value
  ref bottom left
end
text 4% 80% =====
  ref bottom left
end
text 14% 80% ============
  ref bottom left
end
""")

        sys.stdout.write("Color(R:G:B)\tValue\n")
        sys.stdout.write("============\t==========\n")

        line1 = 78
        line2 = 76
        line3 = 75

        i = 1
        first = True

        while i < numint:
            if flag_m:
                # math notation
                if first:
                    closebracket = "]"
                    openbracket = "["
                    mincomparison = ">="
                    first = False
                else:
                    closebracket = "]"
                    openbracket = "]"
                    mincomparison = ">"
            else:
                closebracket = "" 
                openbracket = ""
                if first:
                    mincomparison = ">="
                    first = False
                else:
                    mincomparison = ">"

            themecolor = ":".join(__builtins__.map(str,color))
            if flag_f:
                linecolor = "none"
            else:
                if type in ["line", "boundary"]:
                    linecolor = themecolor
                else:
                    linecolor = linecolor

            rangemin = __builtins__.min(breakpoints)
            rangemax = __builtins__.max(breakpoints)

            if not annotations:
                extranote = ""
            else:
                extranote = annotations[i]

            if i < xlower or i >= xupper:
                xline1 = line2 + 2
                xline3 = line2 - 1
                out(f_graph, locals(), """\
color $themecolor
polygon
5 $xline1
8 $xline1
8 $xline3
5 $xline3
color $linecolor
move 5 $xline1
draw 8 $xline1
draw 8 $xline3
draw 5 $xline3
draw 5 $xline1
move 14 $line2
color 0:0:0
text $openbracket$rangemin - $rangemax$closebracket $extranote
""")
            else:
                if i == xlower:
                    out(f_graph, locals(), """\
color 0:0:0
move 10 $line2
text ...
""")
                else:
                    #undo next increment
                    line2 += 4

            if i < xlower or i >= xupper:
                out(f_gisleg, locals(), """\
area $themecolor $linecolor - {$openbracket$rangemin - $rangemax$closebracket $extranote}
""")

                if type in ["line", "boundary"]:
                    out(f_psleg, locals(), """\
line 5% $xline1% 8% $xline1%
  color $linecolor
end
text 14% $xline1% $openbracket$rangemin - $rangemax$closebracket $extranote
  ref center left
end
""")
                elif type in ["point", "centroid"]:
                    out(f_psleg, locals(), """\
point 8% $xline1%
  color $linecolor
  fcolor $themecolor
  size $size
  symbol $icon
end
text 14% $xline1% $openbracket$rangemin - $rangemax$closebracket $extranote
  ref center left
end
""")
                else:
                    out(f_psleg, locals(), """\
rectangle 5% $xline1% 8% $xline3%
  color 0:0:0
  fcolor $themecolor
end
text 14% $xline3% $openbracket$rangemin - $rangemax$closebracket DCADCA $extranote
  ref bottom left
end
""")
            else:
                if i == xlower:
                    out(f_psleg, locals(), """\
color 0:0:0
text 14% $xline3% ...
  ref bottom left
end
""")

                f_gisleg.write("text - - - {...}\n")

            sys.stdout.write(subs(locals(), "$themecolor\t\t$openbracket$rangemin - $rangemax$closebracket $extranote\n"))
            if not where:
                sqlwhere = subs(locals(), "$column $mincomparison $rangemin AND $column <= $rangemax")
            else:
                sqlwhere = subs(locals(), "$column $mincomparison $rangemin AND $column <= $rangemax AND $where")

            # update color to database?
            if flag_u:
                sql = subs(locals(), "UPDATE $table SET GRASSRGB = '$themecolor' WHERE $sqlwhere")
                grass.write_command('db.execute', database = database, driver = driver, stdin = sql)

            # Create group for GIS Manager
            if flag_g:
                # change rgb colors to hex
                xthemecolor = "#%02X%02X%02X" % tuple(__builtins__.map(int, themecolor.split(":")))
                #xlinecolor=`echo $linecolor | awk -F: '{printf("#%02X%02X%02X\n",$1,$2,$3)}'`

                if "$linecolor" == "black":
                    xlinecolor = "#000000"
                else:
                    xlinecolor = xthemecolor


                # create group entry
                out(f_group, locals(), """\
  _check 1
  Vector $column = $rangemin - $rangemax
    _check 1
    map $map
    display_shape 1
    display_cat 0
    display_topo 0
    display_dir 0
    display_attr 0
    type_point $ptype
    type_line $ltype
    type_boundary $btype
    type_centroid $ctype
    type_area $atype
    type_face 0
    color $xlinecolor
    fcolor $xthemecolor
    width $ptsize
    _use_fcolor 1
    lcolor #000000
    sqlcolor 0
    icon $icon
    size $ptsize
    field $layer
    lfield $layer
    attribute
    xref left
    yref center
    lsize 8
    cat
    where $sqlwhere
    _query_text 0
    _query_edit 1
    _use_where 1
    minreg
    maxreg
    _width 0.1
  End
""")

            # display theme vector map

            grass.run_command('d.vect', map = map, type = type, layer = layer,
                              where = sqlwhere,
                              color = linecolor, fcolor = themecolor, icon = icon, size = ptsize)

            if type in ["line", "boundary"]:
                out(f_psmap, locals(), """\
vlines $map
  type $type
  layer $layer
  where $sqlwhere
  color $linecolor
  label $rangemin - $rangemax
end
""")
            elif type in ["point", "centroid"]:
                out(f_psmap, locals(), """\
vpoints $map
  type $type
  layer $layer
  where $sqlwhere
  color $linecolor
  fcolor $themecolor
  symbol $icon
  label $rangemin - $rangemax
end
""")
            else:
                out(f_psmap, locals(), """\
vareas $map
  layer $layer
  where $sqlwhere
  color $linecolor
  fcolor $themecolor
  label $rangemin - $rangemax
end
""")

            # increment for next theme
            i += 1
            if i == numint:
                color = endcolor
            else:
                color = [a - b for a, b in zip(color, clrstep)]
            line1 -= 4
            line2 -= 4
            line3 -= 4

    #graduated points and line widths thematic mapping

    if themetype in ["graduated_points", "graduated_lines"]:

        #display graduated points/lines by intervals
        if themecalc == "interval":
            out(f_graph, locals(), """\
move 4 87
text Mapped by $numint intervals of $step
""")

            out(f_gisleg, locals(), """\
subtitle - - - {Mapped by $numint intervals of $step}
""")

            out(f_psleg, locals(), """\
text 4% 87% Mapped by $numint intervals of $step
  ref bottom left
end
""")

            msg(locals(), _("Mapped by $numint intervals of $step"))

        # display graduated points/lines for standard deviation units
        if themecalc == "std_deviation":

            out(f_graph, locals(), """\
move 4 87
text Mapped by standard deviation units of $sd (mean = $mean)
""")

            out(f_gisleg, locals(), """\
subtitle - - - {Mapped by standard deviation units of $sd (mean = $mean)}
""")

            out(f_psleg, locals(), """\
text 4% 87% Mapped by standard deviation units of $sd (mean = $mean)
  ref bottom left
end
""")

            msg(locals(), _("Mapped by standard deviation units of $sd (mean = $mean)"))

        # display graduated points/lines for quartiles
        if themecalc == "quartiles":

            out(f_graph, locals(), """\
move 4 87
text Mapped by quartiles (median = $q2)
""")

            out(f_gisleg, locals(), """\
subtitle - - - {Mapped by quartiles (median = $q2)}
""")

            out(f_psleg, locals(), """\
text 4% 87% Mapped by quartiles (median = $q2)
  ref bottom left
end
""")

            msg(locals(), _("Mapped by quartiles (median = $q2)"))

        line1 = 76
        line2 = 75

        out(f_graph, locals(), """\
move 4 83
text Size/width
move 25 83
text Value
move 4 80
text ==============
move 25 80
text ==============
""")

        out(f_psleg, locals(), """\
text 4% 83% Icon size
  ref bottom left
end
text 25% 83% Value
  ref bottom left
end
text 4% 80% ============
  ref bottom left
end
text 25% 80% ============
  ref bottom left
end
""")


        sys.stdout.write("Size/width\tValue\n")
        sys.stdout.write("==========\t=====\n")

        themecolor = pointcolor

        if flag_f:
            linecolor = "none"

        i = numint
        ptsize = maxsize

        while i >= 1:
            if flag_m:
                # math notation
                if i == 1:
                    closebracket = "]"
                    openbracket = "["
                    mincomparison = ">="
                else:
                    closebracket = "]"
                    openbracket = "]"
                    mincomparison = ">"
            else:
                closebracket = "" 
                openbracket = ""
                if i == 1:
                    mincomparison = ">="
                else:
                    mincomparison = ">"

            themecolor = pointcolor

            if flag_f:
                linecolor = "none"

            rangemin = __builtins__.min(breakpoints)
            rangemax = __builtins__.max(breakpoints)

            if not annotations:
                extranote = ""
            else:
                extranote = annotations[i]

            iconsize = int(ptsize / 2)
            lineht = int(ptsize / 4)
            if lineht < 4:
                lineht = 4

            if i < xlower or i >= xupper:
                if themetype == "graduated_lines":
                    out(f_graph, locals(), """\
color $linecolor
""")

                    out(f_gisleg, locals(), """\
line $themecolor $linecolor $ptsize {$openbracket$rangemin - $rangemax$closebracket $extranote}
""")
                else:
                    out(f_graph, locals(), """\
color $themecolor
""")
                    out(f_gisleg, locals(), """\
point $themecolor $linecolor $ptsize {$openbracket$rangemin - $rangemax$closebracket $extranote}
""")

                out(f_graph, locals(), """\
icon + $iconsize 5 $line1
color 0:0:0
move 10 $line2
text $ptsize pts
move 25 $line2
text $openbracket$rangemin - $rangemax$closebracket $extranote
""")
            else:
                if i == xlower:
                    out(f_graph, locals(), """\
color 0:0:0
move 10 $line2
text ...
""")

                    out(f_gisleg, locals(), """\
text - - - ...
""")
                else:
                    # undo future line increment
                    line2 += lineht

            if i < xlower or i >= xupper:
                out(f_psleg, locals(), """\
point 8% $line1%
  color $linecolor
  fcolor $themecolor
  size $iconsize
  symbol $icon
end
text 25% $line1% $openbracket$rangemin - $rangemax$closebracket $extranote
  ref center left
end
""")
            else:
                if i == xlower:
                    out(f_psleg, locals(), """\
text 25% $xline1% ...
   ref center left
end
""")

            sys.stdout.write(subs(locals(), "$ptsize\t\t$openbracket$rangemin - $rangemax$closebracket $extranote\n"))

            if not where:
                sqlwhere = subs(locals(), "$column $mincomparison $rangemin AND $column <= $rangemax")
            else:
                sqlwhere = subs(locals(), "$column $mincomparison $rangemin AND $column <= $rangemax AND $where")

            # update color to database?
            if flag_u:
                sql = subs(locals(), "UPDATE $table SET grassrgb = '$themecolor' WHERE $sqlwhere")
                grass.write_command('db.execute', database = database, driver = driver, stdin = sql)

            # Create group for GIS Manager
            if flag_g:
                # change rgb colors to hex
                xthemecolor = "#%02X%02X%02X" % tuple(__builtins__.map(int,themecolor.split(":")))
                xlinecolor = "#000000"

                # create group entry
                out(f_group, locals(), """\
  _check 1
  Vector $column = $rangemin - $rangemax
    _check 1
    map $map
    display_shape 1
    display_cat 0
    display_topo 0
    display_dir 0
    display_attr 0
    type_point $ptype
    type_line $ltype
    type_boundary $btype
    type_centroid $ctype
    type_area $atype
    type_face 0
    color $xlinecolor
    width $ptsize
    fcolor $xthemecolor
    _use_fcolor 1
    lcolor #000000
    sqlcolor 0
    icon $icon
    size $ptsize
    field $layer
    lfield $layer
    attribute
    xref left
    yref center
    lsize 8
    cat
    where $sqlwhere
    _query_text 0
    _query_edit 1
    _use_where 1
    minreg
    maxreg
    _width 0.1
  End
""")

            #graduates line widths or point sizes

            if themetype == "graduated_lines":
                grass.run_command('d.vect', map = map, type = type, layer = layer,
                                  where = sqlwhere,
                                  color = linecolor, fcolor = themecolor, icon = icon, size = ptsize,
                                  width = ptsize)

            else:
                grass.run_command('d.vect', map = map, type = type, layer = layer,
                                  where = sqlwhere,
                                  color = linecolor, fcolor = themecolor, icon = icon, size = ptsize)

                out(f_psmap, locals(), """\
vpoints $map
  type $type
  layer $layer
  where $sqlwhere
  color $linecolor
  fcolor $themecolor
  symbol $icon
  size $ptsize
  label $rangemin - $rangemax
end
""")

            ptsize -= pointstep

            line1 -= lineht
            line2 -= lineht
            i -= 1

    # Create graphic legend
    f_graph.close()
    if flag_l:
        grass.run_command('d.erase')
        grass.run_command('d.graph', input = tmp_graph)

    # Create group file for GIS Manager
    f_group.write("End\n")
    f_group.close()
    if flag_g:
        shutil.copyfile(tmp_group, "%s.dm" % group)

    # Create ps.map map file
    f_psmap.write("end\n")
    f_psmap.close()
    if psmap:
        shutil.copyfile(tmp_psmap, "%s.psmap" % psmap)

    # Create ps.map legend file
    f_psleg.write("end\n")
    f_psleg.close()
    if psmap:
        shutil.copyfile(tmp_psleg, "%s_legend.psmap" % psmap)

    # Create text file to use with d.graph in GIS Manager
    f_gisleg.close()
    if flag_s:
        tmpdir = os.path.dirname(tmp_gisleg)
        tlegfile = os.path.join(tmpdir, "gismlegend.txt")
        shutil.copyfile(tmp_gisleg, tlegfile)
示例#50
0
def main():
    vector = options["map"]
    layer = options["layer"]
    column = options["column"]
    value = options["value"]
    qcolumn = options["query_column"]
    where = options["where"]
    sqlitefile = options["sqliteextra"]

    mapset = grass.gisenv()["MAPSET"]

    # does map exist in CURRENT mapset?
    if not grass.find_file(vector, element="vector", mapset=mapset)["file"]:
        grass.fatal(_("Vector map <%s> not found in current mapset") % vector)

    try:
        f = grass.vector_db(vector)[int(layer)]
    except KeyError:
        grass.fatal(_("There is no table connected to this map. Run v.db.connect or v.db.addtable first."))

    table = f["table"]
    database = f["database"]
    driver = f["driver"]

    # check for SQLite backend for extra functions
    if sqlitefile and driver != "sqlite":
        grass.fatal(_("Use of libsqlitefunctions only with SQLite backend"))
    if driver == "sqlite" and sqlitefile:
        if not os.access(sqlitefile, os.R_OK):
            grass.fatal(_("File <%s> not found") % sqlitefile)

    # checking column types
    try:
        coltype = grass.vector_columns(vector, layer)[column]["type"]
    except KeyError:
        grass.fatal(_("Column <%s> not found") % column)

    if qcolumn:
        if value:
            grass.fatal(_("<value> and <qcolumn> are mutually exclusive"))
        # special case: we copy from another column
        value = qcolumn
    else:
        if not value:
            grass.fatal(_("Either <value> or <qcolumn> must be given"))
        # we insert a value
        if coltype.upper() not in ["INTEGER", "DOUBLE PRECISION"]:
            value = "'%s'" % value

    cmd = "UPDATE %s SET %s=%s" % (table, column, value)
    if where:
        cmd += " WHERE " + where

    # SQLite: preload extra functions from extension lib if provided by user
    if sqlitefile:
        sqliteload = "SELECT load_extension('%s');\n" % sqlitefile
        cmd = sqliteload + cmd

    grass.verbose('SQL: "%s"' % cmd)
    grass.write_command("db.execute", input="-", database=database, driver=driver, stdin=cmd)

    # write cmd history:
    grass.vector_history(vector)

    return 0