Пример #1
0
 def OnUniqueValues(self, event, justsample = False):
     """!Get unique values"""
     vals = []
     try:
         idx = self.list_columns.GetSelections()[0]
         column = self.list_columns.GetString(idx)
     except:
         self.list_values.Clear()
         return
     
     self.list_values.Clear()
     
     querystring = "SELECT %s FROM %s" % (column, self.tablename)
     
     data = grass.db_select(table = self.tablename,
                            sql = querystring,
                            database = self.database,
                            driver = self.driver)
     i = 0
     for line in data:
         if justsample and i < 256 or \
            not justsample:
             self.list_values.Append(line.strip())
         else:
             break
         i += 1
Пример #2
0
    def OnUniqueValues(self, event, justsample=False):
        """Get unique values"""
        vals = []
        try:
            idx = self.list_columns.GetSelections()[0]
            column = self.list_columns.GetString(idx)
        except:
            self.list_values.Clear()
            return

        self.list_values.Clear()

        data = grass.db_select(sql="SELECT %s FROM %s" %
                               (column, self.tablename),
                               database=self.database,
                               driver=self.driver,
                               sep='{_sep_}')
        if not data:
            return

        desc = self.dbInfo.GetTableDesc(self.dbInfo.GetTable(
            self.layer))[column]

        i = 0
        for item in sorted(set(map(lambda x: desc['ctype'](x[0]), data))):
            if justsample and i > 255:
                break

            if desc['type'] != 'character':
                item = str(item)
            else:
                item = GetUnicodeValue(item)
            self.list_values.Append(item)
            i += 1
Пример #3
0
 def OnUniqueValues(self, event, justsample = False):
     """Get unique values"""
     vals = []
     try:
         idx = self.list_columns.GetSelections()[0]
         column = self.list_columns.GetString(idx)
     except:
         self.list_values.Clear()
         return
     
     self.list_values.Clear()
     
     data = grass.db_select(sql = "SELECT %s FROM %s" % (column, self.tablename),
                            database = self.database,
                            driver = self.driver, sep = '{_sep_}')
     if not data:
         return
     
     desc = self.dbInfo.GetTableDesc(self.dbInfo.GetTable(self.layer))[column]
     
     i = 0
     for item in sorted(set(map(lambda x: desc['ctype'](x[0]), data))):
         if justsample and i > 255:
             break
         
         if desc['type'] != 'character':
             item = str(item)
         else:
             item = GetUnicodeValue(item)
         self.list_values.Append(item)
         i += 1
     def export(self):
          logging.debug("Shapes computation started")
          start = time.time()

          gisenv = gscript.gisenv()

          # query shapes
          sql = 'select min'
          for stype in self.shapetype:
               sql += ',typ{}'.format(stype)
          sql += ' from tvary'
          shapes = gscript.db_select(sql=sql, driver='sqlite',
                                     database=os.path.join(
                                          gisenv['GISDBASE'], gisenv['LOCATION_NAME'],
                                          self.mapset, 'sqlite/sqlite.db')
          )

          # query map attributes
          columns = map(lambda x: 'H_{}T{}'.format(x, self.rainlength), self.return_period)
          columns.insert(0, self.keycolumn)
          data = gscript.vector_db_select(map=self.map_name, columns=','.join(columns))

          # export csv
          self.output_file = '{}/{}.csv'.format(self.output_dir, self.map_name)
          with open(self.output_file, 'w') as fd:
               self.export_csv(fd, data, shapes)
          # output_zfile = self.output_file + '.zip'
          # os.chdir(self.output_dir)
          # with ZipFile(output_zfile, 'w') as fzip:
          #      fzip.write('{}'.format(os.path.basename(self.output_file)))
          # self.output_csv.setValue(output_zfile)

          # export png (graph)
          ### TODO

          logging.info("Shapes calculated successfully: {} sec".format(time.time() - start))
          
          return self.output_file
Пример #5
0
    def OnUniqueValues(self, event, justsample=False):
        """Get unique values"""
        vals = []
        try:
            idx = self.list_columns.GetSelections()[0]
            column = self.list_columns.GetString(idx)
        except:
            self.list_values.Clear()
            return

        self.list_values.Clear()

        sql = "SELECT DISTINCT {column} FROM {table} ORDER BY {column}".format(
            column=column, table=self.tablename)
        if justsample:
            sql += " LIMIT {}".format(255)
        data = grass.db_select(
            sql=sql,
            database=self.database,
            driver=self.driver,
            sep='{_sep_}')
        if not data:
            return

        desc = self.dbInfo.GetTableDesc(
            self.dbInfo.GetTable(self.layer))[column]

        i = 0
        items = []
        for item in data: #sorted(set(map(lambda x: desc['ctype'](x[0]), data))):
            if desc['type'] not in ('character', 'text'):
                items.append(str(item[0]))
            else:
                items.append(u"'{}'".format(GetUnicodeValue(item[0])))
            i += 1

        self.list_values.AppendItems(items)
     def export(self):
          self.shapetypes = self.shapetype.getValue().split(',')

          logging.debug("Shapes computation started")
          start = time.time()

          gisenv = gscript.gisenv()

          # query shapes
          sql = 'select min'
          for stype in self.shapetypes:
               sql += ',typ{}'.format(stype)
          sql += ' from tvary'
          shapes = gscript.db_select(sql=sql, driver='sqlite',
                                     database=os.path.join(gisenv['GISDBASE'], gisenv['LOCATION_NAME'],
                                                           self.mapset, 'sqlite/sqlite.db'))

          # query map attributes
          columns = map(lambda x: 'H_{}T{}'.format(x, self.rainlength_value), self.rasters)
          columns.insert(0, self.keycolumn.getValue())
          data = gscript.vector_db_select(map=self.map_name, columns=','.join(columns))

          # export csv
          self.output_file = '{}/{}.csv'.format(self.output_dir, self.map_name)
          with open(self.output_file, 'w') as fd:
               self.export_csv(fd, data, shapes)
          # output_zfile = self.output_file + '.zip'
          # os.chdir(self.output_dir)
          # with ZipFile(output_zfile, 'w') as fzip:
          #      fzip.write('{}'.format(os.path.basename(self.output_file)))
          # self.output_csv.setValue(output_zfile)
          self.output_csv.setValue(self.output_file)

          # export png (graph)
          ### TODO

          logging.info("Shapes calculated successfully: {} sec".format(time.time() - start))
Пример #7
0
def main():
    options, unused = grass.parser()

    mapName = options['input']
    trainingMapName = options['training']

    columnWithClass = options['class_column']

    useAllColumns = True
    if options['columns']:
        # columns as string
        columns = options['columns'].strip()
        useAllColumns = False

    # TODO: allow same input and output map only if --overwrite was specified
    # TODO: is adding column overwriting or overwriting is only updating of existing?

    # variable names conected to training dataset have training prefix
    # variable names conected to classified dataset have no prefix

    # checking database connection (if map has a table)
    # TODO: layer
    checkDbConnection(trainingMapName)
    checkDbConnection(mapName)

    # loading descriptions first to check them

    trainingTableDescription = grass.db_describe(table=trainingMapName)

    if useAllColumns:
        trainingMinNcols = 3
        checkNcols(trainingMapName, trainingTableDescription, trainingMinNcols)
    else:
        pass

    checkNrows(trainingMapName, trainingTableDescription)

    if not hasColumn(trainingTableDescription, columnWithClass):
        fatal_noClassColumn(trainingMapName, columnWithClass)

    tableDescription = grass.db_describe(table=mapName)

    if useAllColumns:
        minNcols = 2
        checkNcols(mapName, tableDescription, minNcols)
    else:
        pass

    checkNrows(mapName, tableDescription)

    # TODO: check same (+-1) number of columns

    # loadnig data

    # TODO: make fun from this
    if useAllColumns:
        dbTable = grass.db_select(table=trainingMapName)
    else:
        # assuming that columns concatenated by comma
        sql = 'SELECT %s,%s FROM %s' % (columnWithClass, columns,
                                        trainingMapName)
        dbTable = grass.db_select(sql=sql)

    trainingParameters = fromDbTableToSimpleTable(
        dbTable,
        columnsDescription=trainingTableDescription['cols'],
        columnWithClass=columnWithClass)

    if useAllColumns:
        trainingClasses = extractColumnWithClass(
            dbTable,
            columnsDescription=trainingTableDescription['cols'],
            columnWithClass=columnWithClass)
    else:
        # FIXME: magic num?
        trainingClasses = extractNthColumn(dbTable, 0)

    # TODO: hard coded 'cat'?
    if useAllColumns:
        dbTable = grass.db_select(table=mapName)
    else:
        # assuming that columns concatenated by comma
        sql = 'SELECT %s,%s FROM %s' % ('cat', columns, mapName)
        dbTable = grass.db_select(sql=sql)

    parameters = fromDbTableToSimpleTable(
        dbTable,
        columnsDescription=tableDescription['cols'],
        columnWithClass=columnWithClass)
    if useAllColumns:
        cats = extractColumnWithCats(
            dbTable, columnsDescription=tableDescription['cols'])
    else:
        cats = extractNthColumn(dbTable, 0)

    # since dbTable can be big it is better to avoid to have it in memory twice
    del dbTable
    del trainingTableDescription

    classifier = Classifier()
    classifier.learn(trainingParameters, trainingClasses)
    classes = classifier.pred(parameters)

    # add column only if not exists and the classification was successful
    if not hasColumn(tableDescription, columnWithClass):
        addColumn(mapName, columnWithClass, 'int')

    updateColumn(mapName, columnWithClass, cats, classes)
Пример #8
0
def main():
    """
    Build gravity reservoirs in GSFLOW: combines MODFLOW grid and HRU sub-basins
    These define the PRMS soil zone that connects to MODFLOW cells
    """

    ##################
    # OPTION PARSING #
    ##################

    # Parser
    options, flags = gscript.parser()

    # Input
    reaches = options['reaches_input']
    segments = options['segments_input']
    gravity_reservoirs = options['gravres_input']
    HRUs = options['hru_input']
    pour_point = options['pour_point_input']
    bc_cell = options['bc_cell_input']

    # Output
    out_reaches = options['reaches_output']
    out_segments = options['segments_output']
    out_gravity_reservoirs = options['gravres_output']
    out_HRUs = options['hru_output']
    out_pour_point_boundary = options['pour_point_boundary_output']

    ##############
    # PROCESSING #
    ##############

    # Reaches
    ##########
    if (len(reaches) > 0) and (len(out_reaches) > 0):
        columns_in_order = [
            'KRCH', 'IRCH', 'JRCH', 'ISEG', 'IREACH', 'RCHLEN', 'STRTOP',
            'SLOPE', 'STRTHICK', 'STRHC1', 'THTS', 'THTI', 'EPS', 'UHC'
        ]
        outcols = get_columns_in_order(reaches, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_reaches + '.txt', outtable, fmt='%s', delimiter=',')
    elif (len(reaches) > 0) or (len(out_reaches) > 0):
        gscript.fatal(_("You must inlcude both input and output reaches"))

    # Segments
    ###########
    if (len(segments) > 0) and (len(out_segments) > 0):
        columns_in_order = [
            'NSEG', 'ICALC', 'OUTSEG', 'IUPSEG', 'IPRIOR', 'NSTRPTS', 'FLOW',
            'RUNOFF', 'ETSW', 'PPTSW', 'ROUGHCH', 'ROUGHBK', 'CDPTH', 'FDPTH',
            'AWDTH', 'BWDTH'
        ]
        outcols = get_columns_in_order(segments, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_segments + '_4A_INFORMATION.txt',
                   outtable,
                   fmt='%s',
                   delimiter=',')

        columns_in_order = [
            'HCOND1', 'THICKM1', 'ELEVUP', 'WIDTH1', 'DEPTH1', 'THTS1',
            'THTI1', 'EPS1', 'UHC1'
        ]
        outcols = get_columns_in_order(segments, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_segments + '_4B_UPSTREAM.txt',
                   outtable,
                   fmt='%s',
                   delimiter=',')

        columns_in_order = [
            'HCOND2', 'THICKM2', 'ELEVDN', 'WIDTH2', 'DEPTH2', 'THTS2',
            'THTI2', 'EPS2', 'UHC2'
        ]
        outcols = get_columns_in_order(segments, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_segments + '_4C_DOWNSTREAM.txt',
                   outtable,
                   fmt='%s',
                   delimiter=',')
    elif (len(segments) > 0) or (len(out_segments) > 0):
        gscript.fatal(_("You must inlcude both input and output segments"))

    # Gravity reservoirs
    #####################
    if (len(gravity_reservoirs) > 0) and (len(out_gravity_reservoirs) > 0):
        columns_in_order = [
            'gvr_hru_id', 'gvr_hru_pct', 'gvr_cell_id', 'gvr_cell_pct'
        ]
        outcols = get_columns_in_order(gravity_reservoirs, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_gravity_reservoirs + '.txt',
                   outtable,
                   fmt='%s',
                   delimiter=',')
    elif (len(gravity_reservoirs) > 0) or (len(out_gravity_reservoirs) > 0):
        gscript.fatal(
            _("You must inlcude both input and output \
                      gravity reservoirs"))

    # HRUs
    #######
    if (len(HRUs) > 0) and (len(out_HRUs) > 0):
        columns_in_order = [
            'hru_area', 'hru_aspect', 'hru_elev', 'hru_lat', 'hru_slope',
            'hru_segment', 'hru_strmseg_down_id', 'cov_type', 'soil_type'
        ]
        outcols = get_columns_in_order(HRUs, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(HRUs + '.txt', outtable, fmt='%s', delimiter=',')
    elif (len(HRUs) > 0) or (len(out_HRUs) > 0):
        gscript.fatal(_("You must inlcude both input and output HRUs"))

    # Pour Point and Boundary Condition Cell (downstream from pour point)
    ######################################################################
    if (len(out_pour_point_boundary) > 0):
        # Pour point
        if (len(pour_point) > 0):
            _y, _x = np.squeeze(
                gscript.db_select(sql='SELECT row,col FROM ' + pour_point))
            outstr = 'discharge_pt: row_i ' + _y + ' col_i ' + _x
            if (len(bc_cell) > 0):
                outstr += '\n'
            outfile = open(out_pour_point_boundary + ".txt", "w")
            outfile.write(outstr)
        # Bounadry condition
        if (len(bc_cell) > 0):
            outfile = open(out_pour_point_boundary + ".txt", "a")
            _xys = np.squeeze(
                gscript.db_select(sql='SELECT row,col FROM ' + bc_cell))
            # if only one point (so was on N-S, W-E direct connection),
            # expand dimensions so code below works
            if _xys.ndim < 2:
                _xys = np.expand_dims(_xys, axis=0)
            gscript.message(_xys)
            for _cell_coords in _xys:
                _y, _x = _cell_coords
                outstr = 'boundary_condition_pt: row_i ' + _y + ' col_i ' + _x
                if not (_xys == _cell_coords[-1]).all():
                    outstr += '\n'
                outfile.write(outstr)
            outfile.close()
    if (len(pour_point) == 0) and (len(bc_cell) == 0):
        gscript.fatal(_("You must inlcude input and output pp's and/or bc's"))
Пример #9
0
def main():
    vmap = options['map']
    curr_mapset = grass.gisenv()['MAPSET']
    mapset = grass.find_file(name = vmap,
                             element = 'vector')['mapset']

    # check if map exists in the current mapset
    if not mapset:
        grass.fatal(_("Vector map <%s> not found") % vmap)
    if mapset != curr_mapset:
        grass.fatal(_("Vector map <%s> not found in the current mapset") % vmap)

    # check for format
    vInfo = grass.vector_info(vmap)
    if vInfo['format'] != 'PostGIS,PostgreSQL':
        grass.fatal(_("Vector map <%s> is not a PG-link") % vmap)

    # default connection
    global pg_conn
    pg_conn = {'driver': 'pg',
                'database': vInfo['pg_dbname']}

    # default topo schema
    if not options['topo_schema']:
        options['topo_schema'] = 'topo_%s' % options['map']

    # check if topology schema already exists
    topo_found = False
    ret = grass.db_select(sql = "SELECT COUNT(*) FROM topology.topology "
                              "WHERE name = '%s'" % options['topo_schema'],
                          **pg_conn)

    if not ret or int(ret[0][0]) == 1:
        topo_found = True

    if topo_found:
        if int(os.getenv('GRASS_OVERWRITE', '0')) == 1:
            # -> overwrite
            grass.warning(_("Topology schema <%s> already exists and will be overwritten") %
                              options['topo_schema'])
        else:
            grass.fatal(_("option <%s>: <%s> exists.") %
                            ('topo_schema', options['topo_schema']))

        # drop topo schema if exists
        execute(sql = "SELECT topology.DropTopology('%s')" % options['topo_schema'],
                msg = _("Unable to remove topology schema"))

    # create topo schema
    schema, table = vInfo['pg_table'].split('.')
    grass.message(_("Creating new topology schema..."))
    execute("SELECT topology.createtopology('%s', find_srid('%s', '%s', '%s'), %s)" %
                (options['topo_schema'], schema, table, vInfo['geometry_column'], options['tolerance']))

    # add topo column to the feature table
    grass.message(_("Adding new topology column..."))
    execute("SELECT topology.AddTopoGeometryColumn('%s', '%s', '%s', '%s', '%s')" %
                (options['topo_schema'], schema, table, options['topo_column'], vInfo['feature_type']))

    # build topology
    grass.message(_("Building PostGIS topology..."))
    execute("UPDATE %s.%s SET %s = topology.toTopoGeom(%s, '%s', 1, %s)" %
                (schema, table, options['topo_column'], vInfo['geometry_column'],
                 options['topo_schema'], options['tolerance']),
            useSelect = False)

    # report summary
    execute("SELECT topology.TopologySummary('%s')" % options['topo_schema'])

    return 0
Пример #10
0
def main():
    """
    Build gravity reservoirs in GSFLOW: combines MODFLOW grid and HRU sub-basins
    These define the PRMS soil zone that connects to MODFLOW cells
    """

    ##################
    # OPTION PARSING #
    ##################

    # Parser
    options, flags = gscript.parser()

    # Input
    reaches = options["reaches_input"]
    segments = options["segments_input"]
    gravity_reservoirs = options["gravres_input"]
    HRUs = options["hru_input"]
    pour_point = options["pour_point_input"]
    bc_cell = options["bc_cell_input"]

    # Output
    out_reaches = options["reaches_output"]
    out_segments = options["segments_output"]
    out_gravity_reservoirs = options["gravres_output"]
    out_HRUs = options["hru_output"]
    out_pour_point_boundary = options["pour_point_boundary_output"]

    ##############
    # PROCESSING #
    ##############

    # Reaches
    ##########
    if (len(reaches) > 0) and (len(out_reaches) > 0):
        columns_in_order = [
            "KRCH",
            "IRCH",
            "JRCH",
            "ISEG",
            "IREACH",
            "RCHLEN",
            "STRTOP",
            "SLOPE",
            "STRTHICK",
            "STRHC1",
            "THTS",
            "THTI",
            "EPS",
            "UHC",
        ]
        outcols = get_columns_in_order(reaches, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_reaches + ".txt", outtable, fmt="%s", delimiter=",")
    elif (len(reaches) > 0) or (len(out_reaches) > 0):
        gscript.fatal(_("You must inlcude both input and output reaches"))

    # Segments
    ###########
    if (len(segments) > 0) and (len(out_segments) > 0):
        columns_in_order = [
            "NSEG",
            "ICALC",
            "OUTSEG",
            "IUPSEG",
            "IPRIOR",
            "NSTRPTS",
            "FLOW",
            "RUNOFF",
            "ETSW",
            "PPTSW",
            "ROUGHCH",
            "ROUGHBK",
            "CDPTH",
            "FDPTH",
            "AWDTH",
            "BWDTH",
        ]
        outcols = get_columns_in_order(segments, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_segments + "_4A_INFORMATION.txt",
                   outtable,
                   fmt="%s",
                   delimiter=",")

        columns_in_order = [
            "HCOND1",
            "THICKM1",
            "ELEVUP",
            "WIDTH1",
            "DEPTH1",
            "THTS1",
            "THTI1",
            "EPS1",
            "UHC1",
        ]
        outcols = get_columns_in_order(segments, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_segments + "_4B_UPSTREAM.txt",
                   outtable,
                   fmt="%s",
                   delimiter=",")

        columns_in_order = [
            "HCOND2",
            "THICKM2",
            "ELEVDN",
            "WIDTH2",
            "DEPTH2",
            "THTS2",
            "THTI2",
            "EPS2",
            "UHC2",
        ]
        outcols = get_columns_in_order(segments, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_segments + "_4C_DOWNSTREAM.txt",
                   outtable,
                   fmt="%s",
                   delimiter=",")
    elif (len(segments) > 0) or (len(out_segments) > 0):
        gscript.fatal(_("You must inlcude both input and output segments"))

    # Gravity reservoirs
    #####################
    if (len(gravity_reservoirs) > 0) and (len(out_gravity_reservoirs) > 0):
        columns_in_order = [
            "gvr_hru_id", "gvr_hru_pct", "gvr_cell_id", "gvr_cell_pct"
        ]
        outcols = get_columns_in_order(gravity_reservoirs, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(out_gravity_reservoirs + ".txt",
                   outtable,
                   fmt="%s",
                   delimiter=",")
    elif (len(gravity_reservoirs) > 0) or (len(out_gravity_reservoirs) > 0):
        gscript.fatal(
            _("You must inlcude both input and output \
                      gravity reservoirs"))

    # HRUs
    #######
    if (len(HRUs) > 0) and (len(out_HRUs) > 0):
        columns_in_order = [
            "hru_area",
            "hru_aspect",
            "hru_elev",
            "hru_lat",
            "hru_slope",
            "hru_segment",
            "hru_strmseg_down_id",
            "cov_type",
            "soil_type",
        ]
        outcols = get_columns_in_order(HRUs, columns_in_order)
        outarray = np.array(outcols).transpose()
        outtable = np.vstack((columns_in_order, outarray))
        np.savetxt(HRUs + ".txt", outtable, fmt="%s", delimiter=",")
    elif (len(HRUs) > 0) or (len(out_HRUs) > 0):
        gscript.fatal(_("You must inlcude both input and output HRUs"))

    # Pour Point and Boundary Condition Cell (downstream from pour point)
    ######################################################################
    if len(out_pour_point_boundary) > 0:
        # Pour point
        if len(pour_point) > 0:
            _y, _x = np.squeeze(
                gscript.db_select(sql="SELECT row,col FROM " + pour_point))
            outstr = "discharge_pt: row_i " + _y + " col_i " + _x
            if len(bc_cell) > 0:
                outstr += "\n"
            outfile = open(out_pour_point_boundary + ".txt", "w")
            outfile.write(outstr)
        # Bounadry condition
        if len(bc_cell) > 0:
            outfile = open(out_pour_point_boundary + ".txt", "a")
            _xys = np.squeeze(
                gscript.db_select(sql="SELECT row,col FROM " + bc_cell))
            # if only one point (so was on N-S, W-E direct connection),
            # expand dimensions so code below works
            if _xys.ndim < 2:
                _xys = np.expand_dims(_xys, axis=0)
            gscript.message(_xys)
            for _cell_coords in _xys:
                _y, _x = _cell_coords
                outstr = "boundary_condition_pt: row_i " + _y + " col_i " + _x
                if not (_xys == _cell_coords[-1]).all():
                    outstr += "\n"
                outfile.write(outstr)
            outfile.close()
    if (len(pour_point) == 0) and (len(bc_cell) == 0):
        gscript.fatal(_("You must inlcude input and output pp's and/or bc's"))
Пример #11
0
def main():
    vmap = options["map"]
    curr_mapset = grass.gisenv()["MAPSET"]
    mapset = grass.find_file(name=vmap, element="vector")["mapset"]

    # check if map exists in the current mapset
    if not mapset:
        grass.fatal(_("Vector map <%s> not found") % vmap)
    if mapset != curr_mapset:
        grass.fatal(_("Vector map <%s> not found in the current mapset") % vmap)

    # check for format
    vInfo = grass.vector_info(vmap)
    if vInfo["format"] != "PostGIS,PostgreSQL":
        grass.fatal(_("Vector map <%s> is not a PG-link") % vmap)

    # default connection
    global pg_conn
    pg_conn = {"driver": "pg", "database": vInfo["pg_dbname"]}

    # default topo schema
    if not options["topo_schema"]:
        options["topo_schema"] = "topo_%s" % options["map"]

    # check if topology schema already exists
    topo_found = False
    ret = grass.db_select(
        sql="SELECT COUNT(*) FROM topology.topology "
        "WHERE name = '%s'" % options["topo_schema"],
        **pg_conn
    )

    if not ret or int(ret[0][0]) == 1:
        topo_found = True

    if topo_found:
        if int(os.getenv("GRASS_OVERWRITE", "0")) == 1:
            # -> overwrite
            grass.warning(
                _("Topology schema <%s> already exists and will be overwritten")
                % options["topo_schema"]
            )
        else:
            grass.fatal(
                _("option <%s>: <%s> exists.") % ("topo_schema", options["topo_schema"])
            )

        # drop topo schema if exists
        execute(
            sql="SELECT topology.DropTopology('%s')" % options["topo_schema"],
            msg=_("Unable to remove topology schema"),
        )

    # create topo schema
    schema, table = vInfo["pg_table"].split(".")
    grass.message(_("Creating new topology schema..."))
    execute(
        "SELECT topology.createtopology('%s', find_srid('%s', '%s', '%s'), %s)"
        % (
            options["topo_schema"],
            schema,
            table,
            vInfo["geometry_column"],
            options["tolerance"],
        )
    )

    # add topo column to the feature table
    grass.message(_("Adding new topology column..."))
    execute(
        "SELECT topology.AddTopoGeometryColumn('%s', '%s', '%s', '%s', '%s')"
        % (
            options["topo_schema"],
            schema,
            table,
            options["topo_column"],
            vInfo["feature_type"],
        )
    )

    # build topology
    grass.message(_("Building PostGIS topology..."))
    execute(
        "UPDATE %s.%s SET %s = topology.toTopoGeom(%s, '%s', 1, %s)"
        % (
            schema,
            table,
            options["topo_column"],
            vInfo["geometry_column"],
            options["topo_schema"],
            options["tolerance"],
        ),
        useSelect=False,
    )

    # report summary
    execute("SELECT topology.TopologySummary('%s')" % options["topo_schema"])

    return 0
Пример #12
0
def main():
    options, unused = grass.parser()

    mapName = options['input']
    trainingMapName = options['training']

    columnWithClass = options['class_column']

    useAllColumns = True
    if options['columns']:
        # columns as string
        columns = options['columns'].strip()
        useAllColumns = False

    # TODO: allow same input and output map only if --overwrite was specified
    # TODO: is adding column overwriting or overwriting is only updating of existing?

    # variable names connected to training dataset have training prefix
    # variable names connected to classified dataset have no prefix

    # checking database connection (if map has a table)
    # TODO: layer
    checkDbConnection(trainingMapName)
    checkDbConnection(mapName)

    # loading descriptions first to check them

    trainingTableDescription = grass.db_describe(table=trainingMapName)

    if useAllColumns:
        trainingMinNcols = 3
        checkNcols(trainingMapName, trainingTableDescription, trainingMinNcols)
    else:
        pass

    checkNrows(trainingMapName, trainingTableDescription)

    if not hasColumn(trainingTableDescription, columnWithClass):
        fatal_noClassColumn(trainingMapName, columnWithClass)

    tableDescription = grass.db_describe(table=mapName)

    if useAllColumns:
        minNcols = 2
        checkNcols(mapName, tableDescription, minNcols)
    else:
        pass

    checkNrows(mapName, tableDescription)

    # TODO: check same (+-1) number of columns

    # loadnig data

    # TODO: make fun from this
    if useAllColumns:
        dbTable = grass.db_select(table=trainingMapName)
    else:
        # assuming that columns concatenated by comma
        sql = 'SELECT %s,%s FROM %s' % (columnWithClass, columns, trainingMapName)
        dbTable = grass.db_select(sql=sql)

    trainingParameters = fromDbTableToSimpleTable(dbTable,
                                                  columnsDescription=trainingTableDescription['cols'],
                                                  columnWithClass=columnWithClass)

    if useAllColumns:
        trainingClasses = extractColumnWithClass(dbTable,
                                                 columnsDescription=trainingTableDescription['cols'],
                                                 columnWithClass=columnWithClass)
    else:
        # FIXME: magic num?
        trainingClasses = extractNthColumn(dbTable, 0)

    # TODO: hard coded 'cat'?
    if useAllColumns:
        dbTable = grass.db_select(table=mapName)
    else:
        # assuming that columns concatenated by comma
        sql = 'SELECT %s,%s FROM %s' % ('cat', columns, mapName)
        dbTable = grass.db_select(sql=sql)

    parameters = fromDbTableToSimpleTable(dbTable,
                                          columnsDescription=tableDescription['cols'],
                                          columnWithClass=columnWithClass)
    if useAllColumns:
        cats = extractColumnWithCats(dbTable, columnsDescription=tableDescription['cols'])
    else:
        cats = extractNthColumn(dbTable, 0)

    # since dbTable can be big it is better to avoid to have it in memory twice
    del dbTable
    del trainingTableDescription

    classifier = Classifier()
    classifier.learn(trainingParameters, trainingClasses)
    classes = classifier.pred(parameters)

    # add column only if not exists and the classification was successful
    if not hasColumn(tableDescription, columnWithClass):
        addColumn(mapName, columnWithClass, 'int')

    updateColumn(mapName, columnWithClass, cats, classes)