예제 #1
0
def main():
    g.message("Pocitam NDVI...")

    # nastavit region
    g.region(rast=options['tm4'])

    # vypocitat NDVI
    r.mapcalc('ndvi = float({y} - {x}) / ({y} + {x})'.format(x=options['tm3'],
                                                             y=options['tm4']),
              overwrite=True)

    # r.reclass podporuje pouze datovy typ CELL
    r.mapcalc('temp1 = 100 * ndvi', overwrite=True)
    g.message("Reklasifikuji data...")

    # reklasifikovat data
    reclass_rules = """-100 thru 5   = 1 bez vegetace, vodni plochy
5   thru 35  = 2 plochy s minimalni vegetaci
35  thru 100  = 3 plochy pokryte vegetaci"""
    r.reclass(overwrite=True,
              rules='-',
              input='temp1',
              output='r_ndvi',
              stdin_=reclass_rules)

    # nastavit tabulku barev
    color_rules = """1 red
2 yellow
3 0 136 26"""
    r.colors(quiet=True, map='r_ndvi', rules='-', stdin_=color_rules)

    # vytiskout zakladni charakteristiku dat
    r.report(map='r_ndvi', units=['c', 'p', 'h'])
예제 #2
0
def main():
    g.message("Pocitam NDVI...")

    # nastavit region
    g.region(rast=options['tm4'])
    
    # vypocitat NDVI
    r.mapcalc('ndvi = float({y} - {x}) / ({y} + {x})'.format(x=options['tm3'], y=options['tm4']), overwrite = True)
    
    # r.reclass podporuje pouze datovy typ CELL
    r.mapcalc('temp1 = 100 * ndvi', overwrite = True)
    g.message("Reklasifikuji data...")
    
    # reklasifikovat data
    reclass_rules = """-100 thru 5   = 1 bez vegetace, vodni plochy
5   thru 35  = 2 plochy s minimalni vegetaci
35  thru 100  = 3 plochy pokryte vegetaci"""
    r.reclass(overwrite = True, rules = '-',
              input = 'temp1', output = 'r_ndvi', stdin_ = reclass_rules)
    
# nastavit tabulku barev
    color_rules = """1 red
2 yellow
3 0 136 26"""
    r.colors(quiet = True,
             map = 'r_ndvi', rules = '-', stdin_ = color_rules)
    
    # vytiskout zakladni charakteristiku dat 
    r.report(map = 'r_ndvi', units = ['c', 'p', 'h'])
예제 #3
0
def recode_map(raster, rules, colors, output):
    """Scores a raster map based on a set of category recoding rules.

    This is a wrapper around r.recode

    Parameters
    ----------
    raster :
        Name of input raster map

    rules :
        Rules for r.recode

    colors :
        Color rules for r.colors

    output :
        Name of output raster map

    Returns
    -------
        Does not return any value

    Examples
    --------
    ...
    """
    msg = "Setting NULL cells in {name} map to 0"
    msg = msg.format(name=raster)
    grass.debug(_(msg))

    # ------------------------------------------
    r.null(map=raster, null=0)  # Set NULLs to 0
    msg = "To Do: confirm if setting the '{raster}' map's NULL cells to 0 is right"
    msg = msg.format(raster=raster)
    grass.debug(_(msg))
    # Is this right?
    # ------------------------------------------

    r.recode(input=raster, rules=rules, output=output)

    r.colors(map=output, rules="-", stdin=SCORE_COLORS, quiet=True)

    grass.verbose(_("Scored map {name}:".format(name=raster)))
예제 #4
0
def main():
    soillossbare = options['soillossbare']
    cpmax = options['cpmax']
    maxsoilloss = options['maxsoilloss']
    
    r.mapcalc(cpmax + "=" +  maxsoilloss + "/" + soillossbare)
    
    cpmaxrules = '\n '.join([
        "0.00  56:145:37",
        "0.01  128:190:91",
        "0.05  210:233:153",
        "0.1  250:203:147",
        "0.15  225:113:76",
        "0.2  186:20:20",
        "100  0:0:0"
    ])

    r.colors(map = cpmax, rules = '-', stdin = cpmaxrules)
    
    gscript.info('Calculation of CPmax-scenario in <%s> finished.' %cpmax )
예제 #5
0
def main():
    soillossbare = options['soillossbare']
    soillossgrow = options['soillossgrow']
    cfactor = options['cfactor']
    pfactor = options['pfactor']    
    map = options['map']
    factorcols = options['factorcols'].split(',')
    
    quiet = True
    if gscript.verbosity() > 2:
        quiet=False

    if not (cfactor or pfactor):
        if not map:
            gscript.fatal('Please give either factor raster map(s) or vector map with factor(s)')
        elif not factorcols:
            gscript.fatal("Please give 'factorcols' (attribute columns with factor(s))  for <%s>" %map)
        
        factors = ()
        for factorcol in factorcols:
            output = map.split('@')[0] + '.' + factorcol
            gscript.message('Rasterize <%s> with attribute <%s>' %(map, factorcol) 
                + '\n to raster map <%s> ...' %(output) )
            v.to_rast(input=map, use='attr', attrcolumn=factorcol, 
                      output=output, quiet=quiet)
            factors += (output,)
    
    else: factors = (cfactor, pfactor)
    
    gscript.message('Multiply factors <%s> with <%s> ...' %(factors, soillossbare) )
    formula = soillossgrow + '=' + soillossbare 
    for factor in factors:
        formula += '*' + factor
    r.mapcalc(formula)
    
    ## apply color rules
    r.colors(map = soillossgrow,
                    rules = '-', stdin = colorrules['soillossgrow'],
                    quiet = quiet)
예제 #6
0
def main():
    # user specified variables
    dem = options["elevation"]
    slope = options["slope"]
    aspect = options["aspect"]
    neighborhood_size = options["size"]
    output = options["output"]
    nprocs = int(options["nprocs"])
    exponent = float(options["exponent"])

    # check for valid neighborhood sizes
    neighborhood_size = neighborhood_size.split(",")
    neighborhood_size = [int(i) for i in neighborhood_size]

    if any([True for i in neighborhood_size if i % 2 == 0]):
        gs.fatal(
            "Invalid size - neighborhood sizes have to consist of odd numbers")

    if min(neighborhood_size) == 1:
        gs.fatal("Neighborhood sizes have to be > 1")

    # determine nprocs
    if nprocs < 0:
        n_cores = mp.cpu_count()
        nprocs = n_cores - (nprocs + 1)

    # temporary raster map names for slope, aspect, x, y, z components
    if slope == "":
        slope_raster = create_tempname("tmpSlope_")
    else:
        slope_raster = slope

    if aspect == "":
        aspect_raster = create_tempname("tmpAspect_")
    else:
        aspect_raster = aspect

    z_raster = create_tempname("tmpzRaster_")
    x_raster = create_tempname("tmpxRaster_")
    y_raster = create_tempname("tmpyRaster_")

    # create slope and aspect rasters
    if slope == "" or aspect == "":
        gs.message("Calculating slope and aspect...")
        gr.slope_aspect(
            elevation=dem,
            slope=slope_raster,
            aspect=aspect_raster,
            format="degrees",
            precision="FCELL",
            zscale=1.0,
            min_slope=0.0,
            quiet=True,
        )

    # calculate x y and z rasters
    # note - GRASS sin/cos functions differ from ArcGIS which expects input grid in radians
    # whereas GRASS functions expect degrees
    # no need to convert slope and aspect to radians as in the original ArcGIS script
    x_expr = "{x} = float( sin({a}) * sin({b}) )".format(x=x_raster,
                                                         a=aspect_raster,
                                                         b=slope_raster)

    y_expr = "{y} = float( cos({a}) * sin({b}) )".format(y=y_raster,
                                                         a=aspect_raster,
                                                         b=slope_raster)

    z_expr = "{z} = float( cos({a}) )".format(z=z_raster, a=slope_raster)

    # calculate x, y, z components (parallel)
    gs.message("Calculating x, y, and z rasters...")

    mapcalc = Module("r.mapcalc", run_=False)
    queue = ParallelModuleQueue(nprocs=nprocs)

    mapcalc1 = copy.deepcopy(mapcalc)
    m = mapcalc1(expression=x_expr)
    queue.put(m)

    mapcalc2 = copy.deepcopy(mapcalc)
    m = mapcalc2(expression=y_expr)
    queue.put(m)

    mapcalc3 = copy.deepcopy(mapcalc)
    m = mapcalc3(expression=z_expr)
    queue.put(m)

    queue.wait()

    # calculate x, y, z neighborhood sums (parallel)
    gs.message(
        "Calculating sums of x, y, and z rasters in selected neighborhoods...")

    x_sum_list = []
    y_sum_list = []
    z_sum_list = []

    neighbors = Module("r.neighbors", overwrite=True, run_=False)
    queue = ParallelModuleQueue(nprocs=nprocs)

    for size in neighborhood_size:
        # create temporary raster names for neighborhood x, y, z sums
        x_sum_raster = create_tempname("tmpxSumRaster_")
        x_sum_list.append(x_sum_raster)

        y_sum_raster = create_tempname("tmpySumRaster_")
        y_sum_list.append(y_sum_raster)

        z_sum_raster = create_tempname("tmpzSumRaster_")
        z_sum_list.append(z_sum_raster)

        # create weights
        mat = idw_weights(size, exponent)

        # queue jobs for x, y, z neighborhood sums
        neighbors_xsum = copy.deepcopy(neighbors)
        n = neighbors_xsum(
            input=x_raster,
            output=x_sum_raster,
            method="average",
            size=size,
            weight=mat,
            stdin=mat,
        )
        queue.put(n)

        neighbors_ysum = copy.deepcopy(neighbors)
        n = neighbors_ysum(
            input=y_raster,
            output=y_sum_raster,
            method="average",
            size=size,
            weight=mat,
        )
        queue.put(n)

        neighbors_zsum = copy.deepcopy(neighbors)
        n = neighbors_zsum(
            input=z_raster,
            output=z_sum_raster,
            method="average",
            size=size,
            weight=mat,
        )
        queue.put(n)

    queue.wait()

    # calculate the resultant vector and final ruggedness raster
    # modified from the original script to multiple each SumRaster by the n neighborhood
    # cells to get the sum
    gs.message("Calculating the final ruggedness rasters...")

    mapcalc = Module("r.mapcalc", run_=False)
    queue = ParallelModuleQueue(nprocs=nprocs)
    vrm_list = []

    for x_sum_raster, y_sum_raster, z_sum_raster, size in zip(
            x_sum_list, y_sum_list, z_sum_list, neighborhood_size):

        if len(neighborhood_size) > 1:
            vrm_name = "_".join([output, str(size)])
        else:
            vrm_name = output

        vrm_list.append(vrm_name)

        vrm_expr = "{x} = float(1-( (sqrt(({a}*{d})^2 + ({b}*{d})^2 + ({c}*{d})^2) / {d})))".format(
            x=vrm_name,
            a=x_sum_raster,
            b=y_sum_raster,
            c=z_sum_raster,
            d=int(size) * int(size),
        )
        mapcalc1 = copy.deepcopy(mapcalc)
        m = mapcalc1(expression=vrm_expr)
        queue.put(m)

    queue.wait()

    # set colors
    gr.colors(flags="e", map=vrm_list, color="ryb")

    # set metadata
    for vrm, size in zip(vrm_list, neighborhood_size):
        title = "Vector Ruggedness Measure (size={size})".format(size=size)
        gr.support(map=vrm, title=title)

    return 0
예제 #7
0
파일: test.py 프로젝트: ruegdeg/r.learn.ml
from plotnine import *

(ggplot(df, aes(x="x", y="y", fill="value")) + geom_tile() + coord_fixed() +
 facet_wrap("variable") + theme_light() + theme(axis_title=element_blank()))

from sklearn.ensemble import RandomForestClassifier

clf = RandomForestClassifier(n_estimators=100)
clf.fit(X, y)

stack.predict(clf, output='test', overwrite=True, height=25)
stack.predict_proba(clf, output='test', overwrite=True, height=25)

test = RasterRow('test')
from grass.pygrass.modules.shortcuts import raster as r
r.colors('test', color='random')
test
test.close()

from sklearn.model_selection import cross_validate
cross_validate(clf, X, y, cv=3)

from grass.pygrass.gis.region import Region
from grass.pygrass.modules.grid.grid import GridModule
from grass.pygrass.modules.grid import split
from grass.pygrass.modules.shortcuts import general as g
from grass.pygrass.raster import RasterRow
import multiprocessing as mltp
from itertools import chain
import time
import numpy as np
예제 #8
0
def compute_supply(
    base,
    recreation_spectrum,
    highest_spectrum,
    base_reclassification_rules,
    reclassified_base,
    reclassified_base_title,
    flow,
    flow_map_name,
    aggregation,
    ns_resolution,
    ew_resolution,
    print_only=False,
    flow_column_name=None,
    vector=None,
    supply_filename=None,
    use_filename=None,
):
    """
     Algorithmic description of the "Contribution of Ecosysten Types"

     # FIXME
     '''
     1   B ← {0, .., m-1}     :  Set of aggregational boundaries
     2   T ← {0, .., n-1}     :  Set of land cover types
     3   WE ← 0               :  Set of weighted extents
     4   R ← 0                :  Set of fractions
     5   F ← 0
     6   MASK ← HQR           : High Quality Recreation
     7   foreach {b} ⊆ B do   : for each aggregational boundary 'b'
     8      RB ← 0
     9      foreach {t} ⊆ T do  : for each Land Type
     10         WEt ← Et * Wt   : Weighted Extent = Extent(t) * Weight(t)
     11         WE ← WE⋃{WEt}   : Add to set of Weighted Extents
     12     S ← ∑t∈WEt
     13     foreach t ← T do
     14        Rt ← WEt / ∑WE
     15        R ← R⋃{Rt}
     16     RB ← RB⋃{R}
     '''
     # FIXME

    Parameters
    ----------
    recreation_spectrum:
        Map scoring access to and quality of recreation

    highest_spectrum :
        Expected is a map of areas with highest recreational value (category 9
        as per the report ... )

    base :
        Base land types map for final zonal statistics. Specifically to
        ESTIMAP's recrceation mapping algorithm

    base_reclassification_rules :
        Reclassification rules for the input base map

    reclassified_base :
        Name for the reclassified base cover map

    reclassified_base_title :
        Title for the reclassified base map

    ecosystem_types :

    flow :
        Map of visits, derived from the mobility function, depicting the
        number of people living inside zones 0, 1, 2, 3. Used as a cover map
        for zonal statistics.

    flow_map_name :
        A name for the 'flow' map. This is required when the 'flow' input
        option is not defined by the user, yet some of the requested outputs
        required first the production of the 'flow' map. An example is the
        request for a supply table without requesting the 'flow' map itself.

    aggregation :

    ns_resolution :

    ew_resolution :

    statistics_filename :

    supply_filename :
        Name for CSV output file of the supply table

    use_filename :
        Name for CSV output file of the use table

    flow_column_name :
        Name for column to populate with 'flow' values

    vector :
        If 'vector' is given, a vector map of the 'flow' along with appropriate
        attributes will be produced.

    ? :
        Land cover class percentages in ROS9 (this is: relative percentage)

    output :
        Supply table (distribution of flow for each land cover class)

    Returns
    -------
    This function produces a map to base the production of a supply table in
    form of CSV.

    Examples
    --------
    """
    # Inputs
    flow_in_base = flow + "_" + base
    base_scores = base + ".scores"

    # Define lists and dictionaries to hold intermediate data
    statistics_dictionary = {}
    weighted_extents = {}
    flows = []

    # MASK areas of high quality recreation
    r.mask(raster=highest_spectrum, overwrite=True, quiet=True)

    # Reclassify land cover map to MAES ecosystem types
    r.reclass(
        input=base,
        rules=base_reclassification_rules,
        output=reclassified_base,
        quiet=True,
    )
    # add to "remove_at_exit" after the reclassified maps!

    # Discard areas out of MASK
    copy_equation = EQUATION.format(result=reclassified_base,
                                    expression=reclassified_base)
    r.mapcalc(copy_equation, overwrite=True)

    # Count flow within each land cover category
    r.stats_zonal(
        base=base,
        flags="r",
        cover=flow_map_name,
        method="sum",
        output=flow_in_base,
        overwrite=True,
        quiet=True,
    )

    # Set colors for "flow" map
    r.colors(map=flow_in_base, color=MOBILITY_COLORS, quiet=True)

    # Parse aggregation raster categories and labels
    categories = grass.parse_command("r.category",
                                     map=aggregation,
                                     delimiter="\t")

    for category in categories:

        # Intermediate names

        cells = highest_spectrum + ".cells" + "." + category
        remove_map_at_exit(cells)

        extent = highest_spectrum + ".extent" + "." + category
        remove_map_at_exit(extent)

        weighted = highest_spectrum + ".weighted" + "." + category
        remove_map_at_exit(weighted)

        fractions = base + ".fractions" + "." + category
        remove_map_at_exit(fractions)

        flow_category = "_flow_" + category
        flow = base + flow_category
        remove_map_at_exit(flow)

        flow_in_reclassified_base = reclassified_base + "_flow"
        flow_in_category = reclassified_base + flow_category
        flows.append(flow_in_category)  # add to list for patching
        remove_map_at_exit(flow_in_category)

        # Output names

        msg = "Processing aggregation raster category: {r}"
        msg = msg.format(r=category)
        grass.debug(_(msg))
        # g.message(_(msg))

        # First, set region to extent of the aggregation map
        # and resolution to the one of the population map
        # Note the `-a` flag to g.region: ?
        # To safely modify the region: grass.use_temp_region()  # FIXME
        g.region(
            raster=aggregation,
            nsres=ns_resolution,
            ewres=ew_resolution,
            flags="a",
            quiet=True,
        )

        msg = "|! Computational resolution matched to {raster}"
        msg = msg.format(raster=aggregation)
        grass.debug(_(msg))

        # Build MASK for current category & high quality recreation areas
        msg = "Setting category '{c}' of '{a}' as a MASK"
        grass.verbose(_(msg.format(c=category, a=aggregation)))

        masking = "if( {spectrum} == {highest_quality_category} && "
        masking += "{aggregation} == {category}, "
        masking += "1, null() )"
        masking = masking.format(
            spectrum=recreation_spectrum,
            highest_quality_category=HIGHEST_RECREATION_CATEGORY,
            aggregation=aggregation,
            category=category,
        )
        masking_equation = EQUATION.format(result="MASK", expression=masking)
        grass.mapcalc(masking_equation, overwrite=True)

        # zoom to MASK
        g.region(zoom="MASK",
                 nsres=ns_resolution,
                 ewres=ew_resolution,
                 quiet=True)

        # Count number of cells within each land category
        r.stats_zonal(
            flags="r",
            base=base,
            cover=highest_spectrum,
            method="count",
            output=cells,
            overwrite=True,
            quiet=True,
        )
        cells_categories = grass.parse_command("r.category",
                                               map=cells,
                                               delimiter="\t")
        grass.debug(_("Cells: {c}".format(c=cells_categories)))

        # Build cell category and label rules for `r.category`
        cells_rules = "\n".join([
            "{0}:{1}".format(key, value)
            for key, value in cells_categories.items()
        ])

        # Discard areas out of MASK
        copy_equation = EQUATION.format(result=cells, expression=cells)
        r.mapcalc(copy_equation, overwrite=True)

        # Reassign cell category labels
        r.category(map=cells, rules="-", stdin=cells_rules, separator=":")

        # Compute extent of each land category
        extent_expression = "@{cells} * area()"
        extent_expression = extent_expression.format(cells=cells)
        extent_equation = EQUATION.format(result=extent,
                                          expression=extent_expression)
        r.mapcalc(extent_equation, overwrite=True)

        # Write extent figures as labels
        r.stats_zonal(
            flags="r",
            base=base,
            cover=extent,
            method="average",
            output=extent,
            overwrite=True,
            verbose=False,
            quiet=True,
        )

        # Write land suitability scores as an ASCII file
        temporary_reclassified_base_map = temporary_filename(
            filename=reclassified_base)
        suitability_scores_as_labels = string_to_file(
            SUITABILITY_SCORES_LABELS,
            filename=temporary_reclassified_base_map)
        remove_files_at_exit(suitability_scores_as_labels)

        # Write scores as raster category labels
        r.reclass(
            input=base,
            output=base_scores,
            rules=suitability_scores_as_labels,
            overwrite=True,
            quiet=True,
            verbose=False,
        )
        remove_map_at_exit(base_scores)

        # Compute weighted extents
        weighted_expression = "@{extent} * float(@{scores})"
        weighted_expression = weighted_expression.format(extent=extent,
                                                         scores=base_scores)
        weighted_equation = EQUATION.format(result=weighted,
                                            expression=weighted_expression)
        r.mapcalc(weighted_equation, overwrite=True)

        # Write weighted extent figures as labels
        r.stats_zonal(
            flags="r",
            base=base,
            cover=weighted,
            method="average",
            output=weighted,
            overwrite=True,
            verbose=False,
            quiet=True,
        )

        # Get weighted extents in a dictionary
        weighted_extents = grass.parse_command("r.category",
                                               map=weighted,
                                               delimiter="\t")

        # Compute the sum of all weighted extents and add to dictionary
        category_sum = sum([
            float(x) if not math.isnan(float(x)) else 0
            for x in weighted_extents.values()
        ])
        weighted_extents["sum"] = category_sum

        # Create a map to hold fractions of each weighted extent to the sum
        # See also:
        # https://grasswiki.osgeo.org/wiki/LANDSAT#Hint:_Minimal_disk_space_copies
        r.reclass(
            input=base,
            output=fractions,
            rules="-",
            stdin="*=*",
            verbose=False,
            quiet=True,
        )

        # Compute weighted fractions of land types
        fraction_category_label = {
            key: float(value) / weighted_extents["sum"]
            for (key, value) in weighted_extents.iteritems()
            if key is not "sum"
        }

        # Build fraction category and label rules for `r.category`
        fraction_rules = "\n".join([
            "{0}:{1}".format(key, value)
            for key, value in fraction_category_label.items()
        ])

        # Set rules
        r.category(map=fractions,
                   rules="-",
                   stdin=fraction_rules,
                   separator=":")

        # Assert that sum of fractions is ~1
        fraction_categories = grass.parse_command("r.category",
                                                  map=fractions,
                                                  delimiter="\t")

        fractions_sum = sum([
            float(x) if not math.isnan(float(x)) else 0
            for x in fraction_categories.values()
        ])
        msg = "Fractions: {f}".format(f=fraction_categories)
        grass.debug(_(msg))

        # g.message(_("Sum: {:.17g}".format(fractions_sum)))
        assert abs(fractions_sum - 1) < 1.0e-6, "Sum of fractions is != 1"

        # Compute flow
        flow_expression = "@{fractions} * @{flow}"
        flow_expression = flow_expression.format(fractions=fractions,
                                                 flow=flow_in_base)
        flow_equation = EQUATION.format(result=flow,
                                        expression=flow_expression)
        r.mapcalc(flow_equation, overwrite=True)

        # Write flow figures as raster category labels
        r.stats_zonal(
            base=reclassified_base,
            flags="r",
            cover=flow,
            method="sum",
            output=flow_in_category,
            overwrite=True,
            verbose=False,
            quiet=True,
        )

        # Parse flow categories and labels
        flow_categories = grass.parse_command("r.category",
                                              map=flow_in_category,
                                              delimiter="\t")
        grass.debug(_("Flow: {c}".format(c=flow_categories)))

        # Build flow category and label rules for `r.category`
        flow_rules = "\n".join([
            "{0}:{1}".format(key, value)
            for key, value in flow_categories.items()
        ])

        # Discard areas out of MASK

        # Check here again!
        # Output patch of all flow maps?

        copy_equation = EQUATION.format(result=flow_in_category,
                                        expression=flow_in_category)
        r.mapcalc(copy_equation, overwrite=True)

        # Reassign cell category labels
        r.category(map=flow_in_category,
                   rules="-",
                   stdin=flow_rules,
                   separator=":")

        # Update title
        reclassified_base_title += " " + category
        r.support(flow_in_category, title=reclassified_base_title)

        # debugging
        # r.report(
        #     flags='hn',
        #     map=(flow_in_category),
        #     units=('k','c','p'),
        # )

        if print_only:
            r.stats(
                input=(flow_in_category),
                output="-",
                flags="nacpl",
                separator=COMMA,
                quiet=True,
            )

        if not print_only:

            if flow_column_name:
                flow_column_prefix = flow_column_name + category
            else:
                flow_column_name = "flow"
                flow_column_prefix = flow_column_name + category

            # Produce vector map(s)
            if vector:

                # The following is wrong

                # update_vector(vector=vector,
                #         raster=flow_in_category,
                #         methods=METHODS,
                #         column_prefix=flow_column_prefix)

                # What can be done?

                # Maybe update columns of an existing map from the columns of
                # the following vectorised raster map(s)
                # ?

                raster_to_vector(raster=flow_in_category,
                                 vector=flow_in_category,
                                 type="area")

            # get statistics
            dictionary = get_raster_statistics(
                map_one=aggregation,  # reclassified_base
                map_two=flow_in_category,
                separator="|",
                flags="nlcap",
            )

            # merge 'dictionary' with global 'statistics_dictionary'
            statistics_dictionary = merge_two_dictionaries(
                statistics_dictionary, dictionary)

        # It is important to remove the MASK!
        r.mask(flags="r", quiet=True)

    # FIXME

    # Add "reclassified_base" map to "remove_at_exit" here, so as to be after
    # all reclassified maps that derive from it

    # remove the map 'reclassified_base'
    # g.remove(flags='f', type='raster', name=reclassified_base, quiet=True)
    # remove_map_at_exit(reclassified_base)

    if not print_only:
        r.patch(flags="",
                input=flows,
                output=flow_in_reclassified_base,
                quiet=True)

        if vector:
            # Patch all flow vector maps in one
            v.patch(
                flags="e",
                input=flows,
                output=flow_in_reclassified_base,
                overwrite=True,
                quiet=True,
            )

        # export to csv
        if supply_filename:
            supply_filename += CSV_EXTENSION
            nested_dictionary_to_csv(supply_filename, statistics_dictionary)

        if use_filename:
            use_filename += CSV_EXTENSION
            uses = compile_use_table(statistics_dictionary)
            dictionary_to_csv(use_filename, uses)

    # Maybe return list of flow maps?  Requires unique flow map names
    return flows
예제 #9
0
def main():

    elevation = options['elevation']
    slope = options['slope']
    flat_thres = float(options['flat_thres'])
    curv_thres = float(options['curv_thres'])
    filter_size = int(options['filter_size'])
    counting_size = int(options['counting_size'])
    nclasses = int(options['classes'])
    texture = options['texture']
    convexity = options['convexity']
    concavity = options['concavity']
    features = options['features']

    # remove mapset from output name in case of overwriting existing map
    texture = texture.split('@')[0]
    convexity = convexity.split('@')[0]
    concavity = concavity.split('@')[0]
    features = features.split('@')[0]

    # store current region settings
    global current_reg
    current_reg = parse_key_val(g.region(flags='pg', stdout_=PIPE).outputs.stdout)
    del current_reg['projection']
    del current_reg['zone']
    del current_reg['cells']

    # check for existing mask and backup if found
    global mask_test
    mask_test = gs.list_grouped(
        type='rast', pattern='MASK')[gs.gisenv()['MAPSET']]
    if mask_test:
        global original_mask
        original_mask = temp_map('tmp_original_mask')
        g.copy(raster=['MASK', original_mask])

    # error checking
    if flat_thres < 0:
        gs.fatal('Parameter thres cannot be negative')

    if filter_size % 2 == 0 or counting_size % 2 == 0:
        gs.fatal(
            'Filter or counting windows require an odd-numbered window size')

    if filter_size >= counting_size:
        gs.fatal(
            'Filter size needs to be smaller than the counting window size')
    
    if features != '' and slope == '':
        gs.fatal('Need to supply a slope raster in order to produce the terrain classification')
                
    # Terrain Surface Texture -------------------------------------------------
    # smooth the dem
    gs.message("Calculating terrain surface texture...")
    gs.message(
        "1. Smoothing input DEM with a {n}x{n} median filter...".format(
            n=filter_size))
    filtered_dem = temp_map('tmp_filtered_dem')
    gs.run_command("r.neighbors", input = elevation, method = "median",
                    size = filter_size, output = filtered_dem, flags='c',
                    quiet=True)

    # extract the pits and peaks based on the threshold
    pitpeaks = temp_map('tmp_pitpeaks')
    gs.message("2. Extracting pits and peaks with difference > thres...")
    r.mapcalc(expression='{x} = if ( abs({dem}-{median})>{thres}, 1, 0)'.format(
                x=pitpeaks, dem=elevation, thres=flat_thres, median=filtered_dem),
                quiet=True)

    # calculate density of pits and peaks
    gs.message("3. Using resampling filter to create terrain texture...")
    window_radius = (counting_size-1)/2
    y_radius = float(current_reg['ewres'])*window_radius
    x_radius = float(current_reg['nsres'])*window_radius
    resample = temp_map('tmp_density')
    r.resamp_filter(input=pitpeaks, output=resample, filter=['bartlett','gauss'],
                    radius=[x_radius,y_radius], quiet=True)

    # convert to percentage
    gs.message("4. Converting to percentage...")
    r.mask(raster=elevation, overwrite=True, quiet=True)
    r.mapcalc(expression='{x} = float({y} * 100)'.format(x=texture, y=resample),
               quiet=True)
    r.mask(flags='r', quiet=True)
    r.colors(map=texture, color='haxby', quiet=True)

    # Terrain convexity/concavity ---------------------------------------------
    # surface curvature using lacplacian filter
    gs.message("Calculating terrain convexity and concavity...")
    gs.message("1. Calculating terrain curvature using laplacian filter...")
    
    # grow the map to remove border effects and run laplacian filter
    dem_grown = temp_map('tmp_elevation_grown')
    laplacian = temp_map('tmp_laplacian')
    g.region(n=float(current_reg['n']) + (float(current_reg['nsres']) * filter_size),
             s=float(current_reg['s']) - (float(current_reg['nsres']) * filter_size),
             w=float(current_reg['w']) - (float(current_reg['ewres']) * filter_size),
             e=float(current_reg['e']) + (float(current_reg['ewres']) * filter_size))

    r.grow(input=elevation, output=dem_grown, radius=filter_size, quiet=True)
    r.mfilter(
        input=dem_grown, output=laplacian,
        filter=string_to_rules(laplacian_matrix(filter_size)), quiet=True)

    # extract convex and concave pixels
    gs.message("2. Extracting convexities and concavities...")
    convexities = temp_map('tmp_convexities')
    concavities = temp_map('tmp_concavities')

    r.mapcalc(
        expression='{x} = if({laplacian}>{thres}, 1, 0)'\
        .format(x=convexities, laplacian=laplacian, thres=curv_thres),
        quiet=True)
    r.mapcalc(
        expression='{x} = if({laplacian}<-{thres}, 1, 0)'\
        .format(x=concavities, laplacian=laplacian, thres=curv_thres),
        quiet=True)

    # calculate density of convexities and concavities
    gs.message("3. Using resampling filter to create surface convexity/concavity...")
    resample_convex = temp_map('tmp_convex')
    resample_concav = temp_map('tmp_concav')
    r.resamp_filter(input=convexities, output=resample_convex,
                    filter=['bartlett','gauss'], radius=[x_radius,y_radius],
                    quiet=True)
    r.resamp_filter(input=concavities, output=resample_concav,
                    filter=['bartlett','gauss'], radius=[x_radius,y_radius],
                    quiet=True)

    # convert to percentages
    gs.message("4. Converting to percentages...")
    g.region(**current_reg)
    r.mask(raster=elevation, overwrite=True, quiet=True)
    r.mapcalc(expression='{x} = float({y} * 100)'.format(x=convexity, y=resample_convex),
               quiet=True)
    r.mapcalc(expression='{x} = float({y} * 100)'.format(x=concavity, y=resample_concav),
               quiet=True)
    r.mask(flags='r', quiet=True)

    # set colors
    r.colors_stddev(map=convexity, quiet=True)
    r.colors_stddev(map=concavity, quiet=True)

    # Terrain classification Flowchart-----------------------------------------
    if features != '':
        gs.message("Performing terrain surface classification...")
        # level 1 produces classes 1 thru 8
        # level 2 produces classes 5 thru 12
        # level 3 produces classes 9 thru 16
        if nclasses == 8: levels = 1
        if nclasses == 12: levels = 2
        if nclasses == 16: levels = 3

        classif = []
        for level in range(levels):
            # mask previous classes x:x+4
            if level != 0:
                min_cla = (4*(level+1))-4
                clf_msk = temp_map('tmp_clf_mask')
                rules = '1:{0}:1'.format(min_cla)
                r.recode(
                    input=classif[level-1], output=clf_msk,
                    rules=string_to_rules(rules), overwrite=True)
                r.mask(raster=clf_msk, flags='i', quiet=True, overwrite=True)

            # image statistics
            smean = r.univar(
                map=slope, flags='g', stdout_=PIPE).outputs.stdout.split(os.linesep)
            smean = [i for i in smean if i.startswith('mean=') is True][0].split('=')[1]

            cmean = r.univar(
                map=convexity, flags='g', stdout_=PIPE).outputs.stdout.split(os.linesep)
            cmean = [i for i in cmean if i.startswith('mean=') is True][0].split('=')[1]

            tmean = r.univar(
                map=texture, flags='g', stdout_=PIPE).outputs.stdout.split(os.linesep)
            tmean = [i for i in tmean if i.startswith('mean=') is True][0].split('=')[1]
            classif.append(temp_map('tmp_classes'))
            
            if level != 0:
                r.mask(flags='r', quiet=True)

            classification(level+1, slope, smean, texture, tmean,
                            convexity, cmean, classif[level])

        # combine decision trees
        merged = []
        for level in range(0, levels):
            if level > 0:
                min_cla = (4*(level+1))-4
                merged.append(temp_map('tmp_merged'))
                r.mapcalc(
                    expression='{x} = if({a}>{min}, {b}, {a})'.format(
                        x=merged[level], min=min_cla, a=merged[level-1],  b=classif[level]))
            else:
                merged.append(classif[level])
        g.rename(raster=[merged[-1], features], quiet=True)
        del TMP_RAST[-1]

    # Write metadata ----------------------------------------------------------
    history = 'r.terrain.texture '
    for key,val in options.iteritems():
        history += key + '=' + str(val) + ' '

    r.support(map=texture,
              title=texture,
              description='generated by r.terrain.texture',
              history=history)
    r.support(map=convexity,
              title=convexity,
              description='generated by r.terrain.texture',
              history=history)
    r.support(map=concavity,
              title=concavity,
              description='generated by r.terrain.texture',
              history=history)

    if features != '':
        r.support(map=features,
                  title=features,
                  description='generated by r.terrain.texture',
                  history=history)
        
        # write color and category rules to tempfiles                
        r.category(
            map=features,
            rules=string_to_rules(categories(nclasses)),
            separator='pipe')
        r.colors(
            map=features, rules=string_to_rules(colors(nclasses)), quiet=True)

    return 0
예제 #10
0
def main():
    soilloss = options['soilloss']
    soilloss9 = soilloss.split('@')[0] + '.9'
    soilloss3 = soilloss.split('@')[0] + '.3'
    colorschema = options['colorschema']
    flag_u = flags['u']
    flag_f = flags['f']

    
    quiet = True
    if gscript.verbosity() > 2:
        quiet=False
    
    #color shemes - contents:
    classrules = {  'soillossbare9' : {},
                    'soillossbare3' : {},
                    'soillossgrow9' : {},
                    'cfactor6' : {},
                    'kfactor6' : {}                
                    }
    
    colorrules = {  'soillossbare9': {},
                    'soillossbare3': {},
                    'soillossbare' : {},
                    'soillossgrow9' : {},
                    'soillossgrow' : {},
                    'cfactor6' : {},
                    'cfactor' : {},
                    'kfactor6' : {},
                    'kfactor' : {}
                    }
                    
    # (c) Gisler 2010                       
    classrules['soillossbare9'] =  '\n '.join([
        "0 thru 20 = 1 < 20",
        "20 thru 30 = 2 20 - 30",
        "30 thru 40 = 3 30 - 40",
        "40 thru 55 = 4 40 - 55",
        "55 thru 100 = 5 55 - 100",
        "100 thru 150 = 6 100 - 150",
        "150 thru 250 = 7 150 - 250",
        "250 thru 500 = 8 250 - 500",
        "500 thru 50000 = 9 > 500",
        ])
    
    # (c) Gisler 2010       
    classrules['soillossbare3'] =  '\n '.join([
        "0 thru 30 = 1 keine Gefährdung",
        "30 thru 55 = 2 Gefährdung",
        "55 thru 50000 = 3  grosse Gefährdung",
        ])
    
    # (c) BLW 2011
    colorrules['soillossbare9'] = '\n '.join([
        "1    0:102:0",
        "2   51:153:0",
        "3   204:255:0",
        "4  255:255:0",
        "5   255:102:0",
        "6  255:0:0",
        "7  204:0:0",
        "8  153:0:0",
        "9  102:0:0",
        ])
    
    # (c) BLW 2011
    colorrules['soillossbare3'] = '\n '.join([
        "1   51:153:0",
        "2  255:255:0",
        "3  255:0:0"
        ])
    
    # (c) BLW 2011
    colorrules['soillossbare'] = '\n '.join([
        "0    0:102:0",
        "20   51:153:0",
        "30   204:255:0",
        "40  255:255:0",
        "55   255:102:0",
        "100  255:0:0",
        "150  204:0:0",
        "250  153:0:0",
        "500  102:0:0",
        "5000  102:0:0"
        ])
    
    # (c) Gisler 2010       
    colorrules['soillossgrow9'] = '\n '.join([
        "1   69:117:183",
        "2  115:146:185",
        "3  163:179:189",
        "4  208:216:193",
        "5  255:255:190",
        "6  252:202:146",
        "7  245:153:106",
        "8  233:100:70",
        "9  213:47:39"
        ])
        
    # (c) Gisler 2010    
    colorrules['soillossgrow9'] = '\n '.join([
        "1   69:117:183",
        "2  115:146:185",
        "3  163:179:189",
        "4  208:216:193",
        "5  255:255:190",
        "6  252:202:146",
        "7  245:153:106",
        "8  233:100:70",
        "9  213:47:39"
        ])
    
    # (c) Gisler 2010    
    colorrules['soillossgrow3'] = '\n '.join([
        "1   69:117:183",
        "2  163:179:189",
        "3  208:216:193",
        "4  245:153:106"
        ])
     
    # (c) Gisler 2010   
    colorrules['soillossgrow'] = '\n '.join([
        "0   69:117:183",
        "1  115:146:185",
        "2  163:179:189",
        "4  208:216:193",
        "7.5  255:255:190",
        "10  252:202:146",
        "15  245:153:106",
        "20  233:100:70",
        "30  213:47:39",
        "300  213:47:39"
        ])
        
    # (c) Gisler 2010   
    classrules['soillossgrow9'] = '\n '.join([
        "0 thru 1 = 1 < 1 ",
        "1 thru 2 = 2 1 - 2 ",
        "2 thru 4 = 3 2 - 4 ",
        "4 thru 7.5 = 4 4 - 7.5",
        "7.5 thru 10 = 5 7.5 - 10",
        "10 thru 15 = 6 10 - 15",
        "15 thru 20 = 7 15 - 20",
        "20 thru 30 = 8  20 - 30",
        "30 thru 5000 = 9  > 30"
        ])
    
    # (c) Gisler 2010   
    classrules['soillossgrow3'] = '\n '.join([
        "0 thru 2 = 1 Toleranz mittelgründige Böden",
        "2 thru 4 = 2 Toleranz tiefgründige Böden",
        "4 thru 7.5 = 3 leichte Überschreitung",
        "7.5 thru 5000 = 4 starke Überschreitung"
        ])
       
    # Gisler 2010
    colorrules['cfactor6']  = '\n '.join([
        "1  56:145:37",
        "2  128:190:91",
        "3  210:233:153",
        "4  250:203:147",
        "5  225:113:76",
        "6  186:20:20"
        ])
        
    # Gisler 2010
    colorrules['cfactor']  = '\n '.join([
        "0.00  56:145:37",
        "0.01  128:190:91",
        "0.05  210:233:153",
        "0.1  250:203:147",
        "0.15  225:113:76",
        "0.2  186:20:20",
        "1  186:20:20",
        ])
        
    # (c) Gisler 2010
    classrules['kfactor5'] = '\n '.join([
        "0 thru 0.20 = 1 < 0.20",
        "0.20 thru 0.25 = 2 0.20 - 0.25",
        "0.25 thru 0.30 = 3 0.25 - 0.3",
        "0.3  thru 0.35 = 4 0.3 - 0.35",
        "0.35 thru 1 = 5 > 0.30"
        ])
    
    # (c) Gisler 2010
    colorrules['kfactor6'] = '\n '.join([
        "1  15:70:15",
        "2  98:131:52",
        "3  204:204:104",
        "4  151:101:50",
        "5  98:21:15"
        ])
        
    # (c) Gisler 2010
    colorrules['kfactor'] = '\n '.join([
        "0.00  15:70:15",
        "0.20  98:131:52",
        "0.25  204:204:104",
        "0.30  151:101:50",
        "0.35  98:21:15"
        ])
        
    # own definitions
    colorrules['cpmax'] = '\n '.join([
        "0.01  102:0:0",
        "0.01  153:0:0",
        "0.02  204:0:0",
        "0.04  255:0:0",
        "0.06   255:102:0",
        "0.08   255:255:0",
        "0.10  204:255:0",
        "0.12   51:153:0",
        "0.15    0:102:0",
        "1000.00    0:102:0"
        ])
            
    classrules9 =  '\n '.join([
        "0 thru 20 = 1 <20",
        "20 thru 30 = 2 20 - 30",
        "30 thru 40 = 3 30 - 40",
        "40 thru 55 = 4 40 - 55",
        "55 thru 100 = 5 55 - 100",
        "100 thru 150 = 6 100 - 150",
        "150 thru 250 = 7 150 - 250",
        "250 thru 500 = 8 250 - 500",
        "500 thru 50000 = 9 >500",
        ])
    
    if colorschema == 'soillossbare':
        classrules9 = classrules['soillossbare9']
        colorrules9 = colorrules['soillossbare9']
        classrules3 = classrules['soillossbare3']
        colorrules3 = colorrules['soillossbare3']
        colorrules = colorrules['soillossbare']

    if colorschema == 'soillossgrow':
        classrules9 = classrules['soillossgrow9']
        colorrules9 = colorrules['soillossgrow9']
        classrules3 = classrules['soillossgrow3']
        colorrules3 = colorrules['soillossgrow3']
        colorrules = colorrules['soillossgrow']
        
    r.reclass(input=soilloss, rules='-', stdin = classrules9, output=soilloss9)
    r.colors(map = soilloss9, rules = '-', stdin = colorrules9, quiet = quiet)
    r.reclass(input=soilloss, rules='-', stdin = classrules3, output=soilloss3)
    r.colors(map = soilloss3, rules = '-', stdin = colorrules3, quiet = quiet)

    if flag_f:
            soilloss3f = soilloss3 + 'f'
            r.neighbors(method='mode', input=soilloss3, selection=soilloss3,
                    output=soilloss3f, size=7)
            soilloss3 = soilloss3f


    if flag_u:
        r.colors(map = soilloss, rules = '-', stdin = colorrules, quiet = quiet)
예제 #11
0
def main():
    # options and flags
    options, flags = gs.parser()
    input_raster = options["input"]
    minradius = int(options["minradius"])
    maxradius = int(options["maxradius"])
    steps = int(options["steps"])
    output_raster = options["output"]

    region = Region()
    res = np.mean([region.nsres, region.ewres])

    # some checks
    if "@" in output_raster:
        output_raster = output_raster.split("@")[0]

    if maxradius <= minradius:
        gs.fatal("maxradius must be greater than minradius")

    if steps < 2:
        gs.fatal("steps must be greater than 1")

    # calculate radi for generalization
    radi = np.logspace(np.log(minradius),
                       np.log(maxradius),
                       steps,
                       base=np.exp(1),
                       dtype=np.int)
    radi = np.unique(radi)
    sizes = radi * 2 + 1

    # multiscale calculation
    ztpi_maps = list()

    for step, (radius, size) in enumerate(zip(radi[::-1], sizes[::-1])):
        gs.message(
            "Calculating the TPI at radius {radius}".format(radius=radius))

        # generalize the dem
        step_res = res * size
        step_res_pretty = str(step_res).replace(".", "_")
        generalized_dem = gs.tempname(4)

        if size > 15:
            step_dem = gs.tempname(4)
            gg.region(res=str(step_res))
            gr.resamp_stats(
                input=input_raster,
                output=step_dem,
                method="average",
                flags="w",
            )
            gr.resamp_rst(
                input=step_dem,
                ew_res=res,
                ns_res=res,
                elevation=generalized_dem,
                quiet=True,
            )
            region.write()
            gg.remove(type="raster", name=step_dem, flags="f", quiet=True)
        else:
            gr.neighbors(input=input_raster, output=generalized_dem, size=size)

        # calculate the tpi
        tpi = gs.tempname(4)
        gr.mapcalc(expression="{x} = {a} - {b}".format(
            x=tpi, a=input_raster, b=generalized_dem))
        gg.remove(type="raster", name=generalized_dem, flags="f", quiet=True)

        # standardize the tpi
        raster_stats = gr.univar(map=tpi, flags="g",
                                 stdout_=PIPE).outputs.stdout
        raster_stats = parse_key_val(raster_stats)
        tpi_mean = float(raster_stats["mean"])
        tpi_std = float(raster_stats["stddev"])
        ztpi = gs.tempname(4)
        ztpi_maps.append(ztpi)
        RAST_REMOVE.append(ztpi)

        gr.mapcalc(expression="{x} = ({a} - {mean})/{std}".format(
            x=ztpi, a=tpi, mean=tpi_mean, std=tpi_std))
        gg.remove(type="raster", name=tpi, flags="f", quiet=True)

        # integrate
        if step > 1:
            tpi_updated2 = gs.tempname(4)
            gr.mapcalc("{x} = if(abs({a}) > abs({b}), {a}, {b})".format(
                a=ztpi_maps[step], b=tpi_updated1, x=tpi_updated2))
            RAST_REMOVE.append(tpi_updated2)
            tpi_updated1 = tpi_updated2
        else:
            tpi_updated1 = ztpi_maps[0]

    RAST_REMOVE.pop()
    gg.rename(raster=(tpi_updated2, output_raster), quiet=True)

    # set color theme
    with RasterRow(output_raster) as src:
        color_rules = """{minv} blue
            -1 0:34:198
            0 255:255:255
            1 255:0:0
            {maxv} 110:15:0
            """
        color_rules = color_rules.format(minv=src.info.min, maxv=src.info.max)
        gr.colors(map=output_raster, rules="-", stdin_=color_rules, quiet=True)
      demFull = np.flipud(dem)
      dem = dem[margin_bottom:margin_top, margin_left:margin_right]
      dem = np.flipud(dem)
      # DEM import into GRASS GIS
      #try:
      DEMarray = garray.array()
      DEMarray[...] = dem
      DEMarray.write('tmp', overwrite=True)
      # Compute map of null areas
      r.mapcalc(scanNameNULL+' = isnull(tmp)', overwrite=True)
      # DEM null filling
      try:
        r.fillnulls(input='tmp', output=scanNameDEM, method='bilinear', overwrite=False)
      except:
        pass
      r.colors(map=scanNameDEM, color='wave')
      # Shaded relief map
      try:
        r.relief(input=scanNameDEM, output=scanNameShaded, overwrite=False)
      except:
        pass
    else:
      print "Processing already complete for", scanName
    #  errorfiles.append(DATfile)


# EXPORT STEP -- DO IT LATER
"""
for sourcedir in sourcedirs:
  DATpaths = sorted(glob.glob(sourcedir+'*.DAT'))
  for DATpath in DATpaths:
예제 #13
0
def export_map(input_name, title, categories, colors, output_name, timestamp):
    """
    Export a raster map by renaming the (temporary) raster map name
    'input_name' to the requested output raster map name 'output_name'.
    This function is (mainly) used to export either of the intermediate
    recreation 'potential' or 'opportunity' maps.

    Parameters
    ----------
    raster :
        Input raster map name

    title :
        Title for the output raster map

    categories :
        Categories and labels for the output raster map

    colors :
        Colors for the output raster map

    output_name :
        Output raster map name

    Returns
    -------
    output_name :
        This function will return the requested 'output_name'

    Examples
    --------
    ..
    """
    finding = grass.find_file(name=input_name, element="cell")
    if not finding["file"]:
        grass.fatal("Raster map {name} not found".format(
            name=input_name))  # Maybe use 'finding'?

    # inform
    msg = "* Outputting '{raster}' map\n"
    msg = msg.format(raster=output_name)
    grass.verbose(_(msg))

    # get categories and labels
    temporary_raster_categories_map = temporary_filename("categories_of_" +
                                                         input_name)
    raster_category_labels = string_to_file(
        string=categories, filename=temporary_raster_categories_map)

    # add ascii file to removal list
    remove_files_at_exit(raster_category_labels)

    # apply categories and description
    r.category(map=input_name, rules=raster_category_labels, separator=":")

    # update meta and colors
    update_meta(input_name, title, timestamp)
    r.colors(map=input_name, rules="-", stdin=colors, quiet=True)

    # rename to requested output name
    g.rename(raster=(input_name, output_name), quiet=True)

    return output_name
예제 #14
0
def main():
    soillossin = options['soillossin']
    soillossout = options['soillossout']
    factorold = options['factorold']
    
    factornew = options['factornew']
    map = options['map']
    factorcol = options['factorcol']
    
    flag_p = flags['p'] # patch factornew with factorold
    flag_k = flags['k'] # calculate k-factor components from % clay p_T, silt p_U, stones p_st, humus p_H 

     
    if not factornew:
        factors = {}
        if flag_k:
            gscript.message('Using factor derived from \
                soil components.')
            parcelmap = Vect(map)
            parcelmap.open(mode='rw', layer=1)
            parcelmap.table.filters.select()
            cur = parcelmap.table.execute()
            col_names = [cn[0] for cn in cur.description]
            rows = cur.fetchall()
           
            for col in (u'Kb',u'Ks',u'Kh', u'K'):
                if col not in parcelmap.table.columns:
                    parcelmap.table.columns.add(col,u'DOUBLE')
           
            for row in rows:
                rowid = row[1]
                p_T = row[7]
                p_U = row[8]
                p_st = row[9]
                p_H = row[10]
    
                print("Parzelle mit id %d :" %rowid)
                for sublist in bodenarten:
                    # p_T and p_U
                    if p_T in range(sublist[2],sublist[3]) \
                        and p_U in range(sublist[4],sublist[5]) :
                        print('Bodenart "' + sublist[1] 
                            + '", Kb = ' + str(sublist[6]))
                        Kb = sublist[6]
                        break
                
                for sublist in skelettgehalte:
                    if p_st < sublist[0]:
                        print('Skelettgehaltsklasse bis ' + str(sublist[0]) 
                            + ' , Ks = ' + str(sublist[1]))
                        Ks = sublist[1]
                        break
            
                   
                for sublist in humusgehalte:
                    if p_H < sublist[0]:
                        print('Humusgehaltsklasse bis ' + str(sublist[0]) 
                            + ' , Ks = ' + str(sublist[1]))
                        Kh = sublist[1]
                        break
                
                
                K = Kb * Ks * Kh
                print('K = ' + str(K))
        
                if K > 0:
                    parcelmap.table.execute("UPDATE " +  parcelmap.name 
                        + " SET"
                        + " Kb=" + str(Kb)
                        + ", Ks=" + str(Ks)
                        + ", Kh=" + str(Kh)
                        + ", K=" + str(K)
                        + " WHERE id=" + str(rowid) )
                    parcelmap.table.conn.commit()
                
            parcelmap.close()
            factorcol2 = 'K'
            
            factors['k'] = map.split('@')[0]+'.tmp.'+factorcol2
            v.to_rast(input=map, use='attr',
                   attrcolumn=factorcol2,
                   output=factors['k'])
            r.null(map=factors['k'], setnull='0')

        
        if factorcol:
            gscript.message('Using factor from column %s of \
                    vector map <%s>.' % (factorcol, map) )
                    
            factors['factorcol'] = map.split('@')[0]+'.tmp.' + factorcol
            v.to_rast(input=map, use='attr',
                   attrcolumn=factorcol,
                   output=factors['factorcol'])
            r.null(map=factors['factorcol'], setnull='0')
        
        print factors.keys()
        if not 'k' in factors and not 'factorcol' in factors: 
            gscript.fatal('Please provide either factor \
                raster map or valid vector map with factor column \
                (kfactor) or factor components columns (Kb, Ks, Kh)' )
        
        #if 'k' in factors and 'factorcol' in factors: 
    
        factornew = map.split('@')[0]+'.kfactor'
        if 'k' in factors and 'factorcol' in  factors:
            factornew = map.split('@')[0]+'.kfactor'
            r.patch(input=(factors['factorcol'],factors['k']),
                    output=factornew)
            
        elif 'k' in factors:
            g.copy(rast=(factors['k'],factornew))
            
        elif 'factorcol' in factors:
            g.copy(rast=(factors['factorcol'],factornew))

            
    if flag_p:
        #factorcorr = factorold + '.update'
        r.patch(input=(factornew,factorold), output=factornew)
        
    formula = soillossout + '=' + soillossin \
                + '/' + factorold  \
                + '*' + factornew
    r.mapcalc(formula)
            
    r.colors(map=soillossout, raster=soillossin)
예제 #15
0
            # DEM import into GRASS GIS
            #try:
            DEMarray = garray.array()
            DEMarray[...] = dem
            DEMarray.write('tmp', overwrite=True)
            # Compute map of null areas
            r.mapcalc(scanNameNULL + ' = isnull(tmp)', overwrite=True)
            # DEM null filling
            try:
                r.fillnulls(input='tmp',
                            output=scanNameDEM,
                            method='bilinear',
                            overwrite=False)
            except:
                pass
            r.colors(map=scanNameDEM, color='wave')
            # Shaded relief map
            try:
                r.relief(input=scanNameDEM,
                         output=scanNameShaded,
                         overwrite=False)
            except:
                pass
        else:
            print "Processing already complete for", scanName
        #  errorfiles.append(DATfile)

# EXPORT STEP -- DO IT LATER
"""
for sourcedir in sourcedirs:
  DATpaths = sorted(glob.glob(sourcedir+'*.DAT'))