Exemplo n.º 1
0
def classification(level, slope, smean, texture, tmean, convexity,
                   cmean, classif):
    # Classification scheme according to Iwahashi and Pike (2007)
    # Simple decision tree that classifies terrain features
    # (slope, texture, convexity) relative to central tendency of features
    #
    # Args:
    #   level: Nested classification level
    #   slope: String, name of slope raster
    #   smean: Float, mean of slope raster for the remaining level partition
    #   texture: String, name of terrain texture raster
    #   tmean: Float, mean of texture raster for remaining level partition
    #   convexity: String, name of convexity raster
    #   cmean: Float, mean of convexity raster for remaining level partition
    #   classif: String, name of map to store classification

    incr = (4*level)-4
    expr = '{x} = if({s}>{smean}, if({c}>{cmean}, if({t}<{tmean}, {i}+1, {i}+2), if({t}<{tmean}, {i}+3, {i}+4)), if({c}>{cmean}, if({t}<{tmean}, {i}+5, {i}+6), if({t}<{tmean}, {i}+7, {i}+8)))'.format(
            x=classif, i=incr,
            s=slope, smean=smean,
            t=texture, tmean=tmean,
            c=convexity, cmean=cmean)
    r.mapcalc(expression=expr)

    return 0
Exemplo n.º 2
0
def main():
    g.message("Pocitam NDVI...")

    # nastavit region
    g.region(rast=options['tm4'])
    
    # vypocitat NDVI
    r.mapcalc('ndvi = float({y} - {x}) / ({y} + {x})'.format(x=options['tm3'], y=options['tm4']), overwrite = True)
    
    # r.reclass podporuje pouze datovy typ CELL
    r.mapcalc('temp1 = 100 * ndvi', overwrite = True)
    g.message("Reklasifikuji data...")
    
    # reklasifikovat data
    reclass_rules = """-100 thru 5   = 1 bez vegetace, vodni plochy
5   thru 35  = 2 plochy s minimalni vegetaci
35  thru 100  = 3 plochy pokryte vegetaci"""
    r.reclass(overwrite = True, rules = '-',
              input = 'temp1', output = 'r_ndvi', stdin_ = reclass_rules)
    
# nastavit tabulku barev
    color_rules = """1 red
2 yellow
3 0 136 26"""
    r.colors(quiet = True,
             map = 'r_ndvi', rules = '-', stdin_ = color_rules)
    
    # vytiskout zakladni charakteristiku dat 
    r.report(map = 'r_ndvi', units = ['c', 'p', 'h'])
Exemplo n.º 3
0
def main():
    g.message("Pocitam NDVI...")

    # nastavit region
    g.region(rast=options['tm4'])

    # vypocitat NDVI
    r.mapcalc('ndvi = float({y} - {x}) / ({y} + {x})'.format(x=options['tm3'],
                                                             y=options['tm4']),
              overwrite=True)

    # r.reclass podporuje pouze datovy typ CELL
    r.mapcalc('temp1 = 100 * ndvi', overwrite=True)
    g.message("Reklasifikuji data...")

    # reklasifikovat data
    reclass_rules = """-100 thru 5   = 1 bez vegetace, vodni plochy
5   thru 35  = 2 plochy s minimalni vegetaci
35  thru 100  = 3 plochy pokryte vegetaci"""
    r.reclass(overwrite=True,
              rules='-',
              input='temp1',
              output='r_ndvi',
              stdin_=reclass_rules)

    # nastavit tabulku barev
    color_rules = """1 red
2 yellow
3 0 136 26"""
    r.colors(quiet=True, map='r_ndvi', rules='-', stdin_=color_rules)

    # vytiskout zakladni charakteristiku dat
    r.report(map='r_ndvi', units=['c', 'p', 'h'])
def upper_value(upper, stu, lan, rot, age, irate, overwrite=False):
    """Compute the upper value of a land.
    """
    expr = ("{upper} = ({stu} + {lan}) / ((1 + {irate})^({rot} - {age}) )"
            " - {lan}")
    r.mapcalc(expr.format(upper=upper, stu=stu, lan=lan, irate=irate,
                          rot=rot, age=age), overwrite=overwrite)
Exemplo n.º 5
0
def revenues(opts, yield_surface, m1t1, m1t2, m1, m2, forest, yield_,
             technical_bioenergy):
    # Calculate revenues
    pid = os.getpid()
    #FIXME: tmp_yield is the raster yield in the other sections of the module
    tmp_yield = 'tmprgreen_%i_yield' % pid
    tmp_wood = 'tmprgreen_%i_wood_price' % pid
    tmp_rev_wood = 'tmprgreen_%i_rev_wood' % pid

    exprpix = '%s=%s*%s/%s*(ewres()*nsres()/10000)' % (
        tmp_rev_wood, tmp_wood, tmp_yield, yield_surface)
    run_command("r.mapcalc", overwrite=True, expression=exprpix)
    # FIXME: Does the coppice produces timber?
    tr1 = ("{total_revenues} ="
           "{technical_surface}*(({m1t1}|||{m2})*({tmp_rev_wood} +"
           "{technical_bioenergy}*{price_energy_woodchips})+"
           "{m1t2}*{technical_bioenergy}*{price_energy_woodchips})")

    r.mapcalc(tr1.format(
        total_revenues=("tmprgreen_%i_total_revenues" % pid),
        technical_surface=('tmprgreen_%i_technical_surface' % pid),
        m1t1=m1t1,
        m2=m2,
        m1t2=m1t2,
        tmp_rev_wood=tmp_rev_wood,
        technical_bioenergy=technical_bioenergy,
        price_energy_woodchips=opts['price_energy_woodchips']),
              overwrite=True)
    return ("tmprgreen_%i_total_revenues" % pid)
Exemplo n.º 6
0
def compensation_cost(comp,
                      lan,
                      tri,
                      upper,
                      irate,
                      gamma,
                      life,
                      width,
                      overwrite=False):
    """Compute the compensation raster map costs"""
    expr = ("{comp} = (({lan} + {tri} * "
            "(1 + {irate})^{life}/({irate} * (1 + {irate}))) "
            "* {gamma} + {upper}) * {width} * nsres() / 10000")
    r.mapcalc(
        expr.format(
            comp=comp,
            lan=lan,
            tri=tri,
            upper=upper,
            irate=irate,
            gamma=gamma,
            life=life,
            width=width,
        ),
        overwrite=overwrite,
    )
def main():
    """
    Creates a hydrologically correct MODFLOW grid that inlcudes minimum
    DEM elevations for all stream cells and mean elevations everywhere else
    """
    """
    dem = 'DEM'
    grid = 'grid_tmp'
    streams = 'streams_tmp'
    streams_MODFLOW = 'streams_tmp_MODFLOW'
    DEM_MODFLOW = 'DEM_coarse'
    resolution = 500
    """

    options, flags = gscript.parser()
    dem = options['dem']
    grid = options['grid']
    streams = options['streams']
    #resolution = float(options['resolution'])
    streams_MODFLOW = options['streams_modflow']
    DEM_MODFLOW = options['dem_modflow']

    # Get number of rows and columns
    colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns'])
    colValues = np.array(
        gscript.vector_db_select(grid, layer=1)['values'].values())
    cats = colValues[:, colNames == 'cat'].astype(int).squeeze()
    rows = colValues[:, colNames == 'row'].astype(int).squeeze()
    cols = colValues[:, colNames == 'col'].astype(int).squeeze()
    nRows = np.max(rows)
    nCols = np.max(cols)

    gscript.use_temp_region()

    # Set the region to capture only the channel
    g.region(raster=dem)
    v.to_rast(input=streams,
              output=streams_MODFLOW,
              use='val',
              value=1.0,
              type='line',
              overwrite=gscript.overwrite(),
              quiet=True)
    r.mapcalc('tmp' + " = " + streams_MODFLOW + " * " + dem, overwrite=True)
    g.rename(raster=('tmp', streams_MODFLOW), overwrite=True, quiet=True)
    g.region(vector=grid, rows=nRows, cols=nCols, quiet=True)
    r.resamp_stats(input=streams_MODFLOW,
                   output=streams_MODFLOW,
                   method='average',
                   overwrite=gscript.overwrite(),
                   quiet=True)
    r.resamp_stats(input=dem,
                   output=DEM_MODFLOW,
                   method='average',
                   overwrite=gscript.overwrite(),
                   quiet=True)
    r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW,
            output=DEM_MODFLOW,
            overwrite=True,
            quiet=True)
Exemplo n.º 8
0
def excavation_cost(exc,
                    excmin,
                    excmax,
                    slope,
                    slim,
                    width,
                    depth,
                    overwrite=False):
    """Compute the excavation cost"""
    expr = ("{exc} = if({slope} < {slim}, "
            "({excmin} + ({excmax} - {excmin}) / {slim} * {slope})"
            "* {width} * {depth} * nsres(),"
            "{excmax} * {width} * {depth} * nsres())")
    r.mapcalc(
        expr.format(
            exc=exc,
            slope=slope,
            excmin=excmin,
            excmax=excmax,
            slim=slim,
            width=width,
            depth=depth,
        ),
        overwrite=overwrite,
    )
Exemplo n.º 9
0
def BackUp_mask(image, ClearSmall):
    baseName = image[0].split('.')[0]
    raster_out = baseName + '.B3_masked'
    B3_toar = selectFromImage(image, 'B3_toar')
    NonSnow = selectFromImage(image, 'nonSnow')
    expression = '%(out)s=%(inp1)s*(%(inp2)s&&%(inp3)s)' \
                 % {'out': raster_out, 'inp1': B3_toar, 'inp2': NonSnow, 'inp3': ClearSmall}
    r.mapcalc(expression=expression, overwrite=True)
    image.append(raster_out)
    logging.info('BackUp_mask')
Exemplo n.º 10
0
def getResidual(image, reconstruction, channel):
    basename = image[0].split('.')[0]
    im = selectFromImage(image, channel)
    raster_out = basename + '.' + channel + '_residual'
    expression = '%(out)s=%(im1)s-%(im2)s;' % {
        'out': raster_out,
        'im1': im,
        'im2': reconstruction
    }
    r.mapcalc(expression=expression, overwrite=True)
    image.append(raster_out)
Exemplo n.º 11
0
def net_revenues(opts, technical_bioenergy, tech_bioC, tech_bioHF,
                 total_revenues, total_costs):
    pid = os.getpid()
    #TODO: I will split the outputs
    # each maps is an output:
    # mandatory maps: econ_bioenergy, net_revenues
    # optional: econ_bioenergyHF, econ_bioenergyC
    #         : total_revenues, total_cost
    econ_bioenergy = opts['econ_bioenergy']
    econ_bioenergyC = (opts['econ_bioenergyc'] if opts['econ_bioenergyc'] else
                       "tmprgreen_%i_econ_bioenergyc" % pid)
    econ_bioenergyHF = (opts['econ_bioenergyhf'] if opts['econ_bioenergyhf']
                        else "tmprgreen_%i_econ_bioenergyhf" % pid)
    net_revenues = opts['net_revenues']

    # Calculate net revenues and economic biomass
    run_command("r.mapcalc",
                overwrite=True,
                expression='%s = %s - %s' %
                (net_revenues, total_revenues, total_costs))
    positive_net_revenues = "tmprgreen_%i_positive_net_revenues" % pid
    run_command("r.mapcalc",
                overwrite=True,
                expression=('%s = if(%s<=0,0,1)' %
                            (positive_net_revenues, net_revenues)))

    #per evitare che vi siano pixel con revenues>0 sparsi
    #si riclassifica la mappa
    #in order to avoid pixel greater than 0 scattered
    #the map must be reclassified
    #considering only the aree clustered greater than 1 hectares
    economic_surface = "tmprgreen_%i_economic_surface" % pid
    run_command("r.reclass.area",
                overwrite=True,
                input=positive_net_revenues,
                output=economic_surface,
                value=1,
                mode="greater")

    expr = "{econ_bioenergy} = {economic_surface}*{tech_bio}"
    r.mapcalc(expr.format(econ_bioenergy=econ_bioenergyHF,
                          economic_surface=economic_surface,
                          tech_bio=tech_bioHF),
              overwrite=True)
    r.mapcalc(expr.format(econ_bioenergy=econ_bioenergyC,
                          economic_surface=economic_surface,
                          tech_bio=tech_bioC),
              overwrite=True)

    econtot = ("%s = %s + %s" %
               (econ_bioenergy, econ_bioenergyC, econ_bioenergyHF))
    run_command("r.mapcalc", overwrite=True, expression=econtot)
Exemplo n.º 12
0
def BackUpAlgorithm(image, ClearSmall, Mediana, T_MEDIAN_THRESHOLD):
    baseName = image[0].split('.')[0]
    raster_out = baseName + '.BackUpMask'
    B3_toar = selectFromImage(image, 'B3_toar')
    Composite = selectFromImage(image, 'Composite')
    expression = 'eval(Mask=%(B3)s*%(Clearsmall)s, ' \
                 'Threshold=%(Mediana)s+%(Thresh)s); ' \
                 '%(output)s=(Mask>Threshold)*%(Composite)s;' \
                 % {'B3': B3_toar, 'Clearsmall': ClearSmall, 'Mediana': Mediana, 'Thresh': T_MEDIAN_THRESHOLD,
                    'output': raster_out, 'Composite': Composite}
    image.append(raster_out)
    r.mapcalc(expression=expression, overwrite=True)
    logging.info('BackUpAlgorithm')
Exemplo n.º 13
0
def main():
    """
    Creates a hydrologically correct MODFLOW grid that inlcudes minimum
    DEM elevations for all stream cells and mean elevations everywhere else
    """
    """
    dem = 'DEM'
    grid = 'grid_tmp'
    streams = 'streams_tmp'
    streams_MODFLOW = 'streams_tmp_MODFLOW'
    DEM_MODFLOW = 'DEM_coarse'
    resolution = 500
    """

    options, flags = gscript.parser()
    dem = options['dem']
    grid = options['grid']
    streams = options['streams']
    #resolution = float(options['resolution'])
    streams_MODFLOW = options['streams_modflow']
    DEM_MODFLOW = options['dem_modflow']

    gscript.use_temp_region()

    # Set the region to capture only the channel
    g.region(raster=dem)
    v.to_rast(input=streams,
              output=streams_MODFLOW,
              use='val',
              value=1.0,
              type='line',
              overwrite=gscript.overwrite(),
              quiet=True)
    r.mapcalc('tmp' + " = " + streams_MODFLOW + " * " + dem, overwrite=True)
    g.rename(raster=('tmp', streams_MODFLOW), overwrite=True, quiet=True)
    g.region(raster=DEM_MODFLOW, quiet=True)
    print "ALTERED"
    r.resamp_stats(input=streams_MODFLOW,
                   output=streams_MODFLOW,
                   method='average',
                   overwrite=gscript.overwrite(),
                   quiet=True)
    r.resamp_stats(input=dem,
                   output=DEM_MODFLOW,
                   method='average',
                   overwrite=gscript.overwrite(),
                   quiet=True)
    r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW,
            output=DEM_MODFLOW,
            overwrite=True,
            quiet=True)
Exemplo n.º 14
0
def main():
    """
    Creates a hydrologically correct MODFLOW grid that inlcudes minimum
    DEM elevations for all stream cells and mean elevations everywhere else
    """
    """
    dem = 'DEM'
    grid = 'grid_tmp'
    streams = 'streams_tmp'
    streams_MODFLOW = 'streams_tmp_MODFLOW'
    DEM_MODFLOW = 'DEM_coarse'
    resolution = 500
    """

    options, flags = gscript.parser()
    dem = options['dem']
    grid = options['grid']
    streams = options['streams']
    resolution = float(options['resolution'])
    streams_MODFLOW = options['streams_modflow']
    DEM_MODFLOW = options['dem_modflow']

    gscript.use_temp_region()

    g.region(raster=dem)
    g.region(vector=grid)
    v.to_rast(input=streams,
              output=streams_MODFLOW,
              use='val',
              value=1.0,
              type='line',
              overwrite=gscript.overwrite(),
              quiet=True)
    r.mapcalc(streams_MODFLOW + " = " + streams_MODFLOW + " * DEM",
              overwrite=True)
    g.region(res=resolution, quiet=True)
    r.resamp_stats(input=streams_MODFLOW,
                   output=streams_MODFLOW,
                   method='minimum',
                   overwrite=gscript.overwrite(),
                   quiet=True)
    r.resamp_stats(input=dem,
                   output=DEM_MODFLOW,
                   method='average',
                   overwrite=gscript.overwrite(),
                   quiet=True)
    r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW,
            output=DEM_MODFLOW,
            overwrite=True,
            quiet=True)
Exemplo n.º 15
0
def combination(management, treatment):
    pid = os.getpid()
    # set combination to avoid several if
    m1t1 = "tmprgreen_%i_m1t1" % pid
    exp = (
        "{combination}=if(({management}=={c1} && ({treatment}=={c2}"
        "||{treatment}==99999)),1,0)"
    )
    r.mapcalc(
        exp.format(
            combination=m1t1, management=management, c1=1, treatment=treatment, c2=1
        ),
        overwrite=True,
    )
    run_command("r.null", map=m1t1, null=0)
    m1t2 = "tmprgreen_%i_m1t2" % pid
    exp = "{combination}=if(({management}=={c1} && {treatment}=={c2}),1,0)"
    r.mapcalc(
        exp.format(
            combination=m1t2, management=management, c1=1, treatment=treatment, c2=2
        ),
        overwrite=True,
    )
    run_command("r.null", map=m1t2, null=0)
    m2 = "tmprgreen_%i_m2" % pid
    exp = "{combination}=if({management}=={c1},1,0)"
    r.mapcalc(exp.format(combination=m2, management=management, c1=2), overwrite=True)
    run_command("r.null", map=m2, null=0)
    m1 = "tmprgreen_%i_m1" % pid
    exp = "{combination}=if({management}=={c1},1,0)"
    r.mapcalc(exp.format(combination=m1, management=management, c1=1), overwrite=True)
    run_command("r.null", map=m1, null=0)
    not2 = "tmprgreen_%i_not2" % pid
    exp = "{combination}=if(({treatment}=={c1} && {treatment}=={c2}),1,0)"
    r.mapcalc(
        exp.format(combination=not2, c1=1, treatment=treatment, c2=99999),
        overwrite=True,
    )
    run_command("r.null", map=not2, null=0)
    # TODO: try to remove all the r.nulle, since I
    # have done it at the beginning
    return m1t1, m1t2, m1, m2, not2
Exemplo n.º 16
0
def main():
    soillossbare = options['soillossbare']
    cpmax = options['cpmax']
    maxsoilloss = options['maxsoilloss']
    
    r.mapcalc(cpmax + "=" +  maxsoilloss + "/" + soillossbare)
    
    cpmaxrules = '\n '.join([
        "0.00  56:145:37",
        "0.01  128:190:91",
        "0.05  210:233:153",
        "0.1  250:203:147",
        "0.15  225:113:76",
        "0.2  186:20:20",
        "100  0:0:0"
    ])

    r.colors(map = cpmax, rules = '-', stdin = cpmaxrules)
    
    gscript.info('Calculation of CPmax-scenario in <%s> finished.' %cpmax )
Exemplo n.º 17
0
def TMaskp_mask(image):
    baseName = image[0].split('.')[0]
    raster_out1 = baseName + '.B3_masked'
    raster_out2 = baseName + '.B5_masked'
    raster_out3 = baseName + '.B6_masked'
    B3_toar = selectFromImage(image, 'B3_toar')
    B5_toar = selectFromImage(image, 'B5_toar')
    B6_toar = selectFromImage(image, 'B6_toar')
    BackUpMask = selectFromImage(image, 'BackUpMask')
    expression = 'eval(Clear=not(%(BackUpMask)s)); ' \
                 '%(out1)s=%(B3)s*Clear;' \
                 '%(out2)s=%(B5)s*Clear;' \
                 '%(out3)s=%(B6)s*Clear' \
                 % {'BackUpMask': BackUpMask, 'out1': raster_out1, 'B3': B3_toar,
                    'out2': raster_out2, 'B5': B5_toar, 'out3': raster_out3, 'B6': B6_toar}
    r.mapcalc(expression=expression, overwrite=True)
    image.append(raster_out1)
    image.append(raster_out2)
    image.append(raster_out3)
    logging.info('TMaskp_mask')
Exemplo n.º 18
0
def float_to_integer(double):
    """Converts an FCELL or DCELL raster map into a CELL raster map

    Parameters
    ----------
    double :
            An 'FCELL' or 'DCELL' type raster map

    Returns
    -------
    This function does not return any value

    Examples
    --------
    ..
    """
    expression = "int({double})"
    expression = expression.format(double=double)
    equation = EQUATION.format(result=double, expression=expression)
    r.mapcalc(equation)
Exemplo n.º 19
0
def FMask(image, radius):
    baseName = image[0].split('.')[0]
    image_BQA = selectFromImage(image, 'BQA')
    raster_FMask = baseName + '.FMask'
    raster_nonSnow = baseName + '.nonSnow'
    raster_composite = baseName + '.Composite'
    expression = 'eval(BQA_int=int(%(BQA)s), ' \
                 'Clouds=((BQA_int & 32)!=0)&&((BQA_int & 64)!=0), ' \
                 'CloudShadows=((BQA_int & 128)!=0)&&((BQA_int & 256)!=0), ' \
                 'Snow=((BQA_int & 512)!=0)&&((BQA_int & 1024)!=0)); ' \
                 '%(out1)s=Clouds || CloudShadows || Snow; ' \
                 '%(out2)s=not(Snow); ' \
                 '%(out3)s=Clouds*3 + Snow*2' \
                %{'BQA':image_BQA, 'out1': raster_FMask, 'out2': raster_nonSnow, 'out3': raster_composite}
    r.mapcalc(expression=expression, overwrite=True)
    #r.grow(input=raster_FMask, output=raster_FMask, radius=radius, overwrite=True)
    image.append(raster_composite)
    image.append(raster_nonSnow)
    logging.info('FMask')
    return raster_FMask
Exemplo n.º 20
0
def main():
    soillossbare = options['soillossbare']
    soillossgrow = options['soillossgrow']
    cfactor = options['cfactor']
    pfactor = options['pfactor']    
    map = options['map']
    factorcols = options['factorcols'].split(',')
    
    quiet = True
    if gscript.verbosity() > 2:
        quiet=False

    if not (cfactor or pfactor):
        if not map:
            gscript.fatal('Please give either factor raster map(s) or vector map with factor(s)')
        elif not factorcols:
            gscript.fatal("Please give 'factorcols' (attribute columns with factor(s))  for <%s>" %map)
        
        factors = ()
        for factorcol in factorcols:
            output = map.split('@')[0] + '.' + factorcol
            gscript.message('Rasterize <%s> with attribute <%s>' %(map, factorcol) 
                + '\n to raster map <%s> ...' %(output) )
            v.to_rast(input=map, use='attr', attrcolumn=factorcol, 
                      output=output, quiet=quiet)
            factors += (output,)
    
    else: factors = (cfactor, pfactor)
    
    gscript.message('Multiply factors <%s> with <%s> ...' %(factors, soillossbare) )
    formula = soillossgrow + '=' + soillossbare 
    for factor in factors:
        formula += '*' + factor
    r.mapcalc(formula)
    
    ## apply color rules
    r.colors(map = soillossgrow,
                    rules = '-', stdin = colorrules['soillossgrow'],
                    quiet = quiet)
Exemplo n.º 21
0
def TOAR(images, bands):
    for im in images:
        for band in bands:
            basename = im[0].split('.')[0]
            name = basename + '.' + band
            i = band[1:]
            metapath = os.path.join(METAPATH, basename + '_MTL.txt')
            prop1 = 'REFLECTANCE_MULT_BAND_%s' % (i)
            prop2 = 'REFLECTANCE_ADD_BAND_%s' % (i)
            properties = [prop1, prop2]
            metadata = readMeta(metapath=metapath, properties=properties)
            A = metadata['REFLECTANCE_MULT_BAND_%s' % (i)]
            B = metadata['REFLECTANCE_ADD_BAND_%s' % (i)]
            output = basename + '.' + band + '_toar'
            im.append(output)
            expression = '%(output)s=%(A)s * %(input)s + %(B)s' % {
                'output': output,
                'A': A,
                'input': name,
                'B': B
            }
            r.mapcalc(expression=expression, overwrite=True)
            delete(im, band)
Exemplo n.º 22
0
def classify(image, const1, const2, const3, const4, const5, const6, recon_B3,
             recon_B6):
    baseName = image[0].split('.')[0]
    raster_out = baseName + '.TMask'
    B3_observe = selectFromImage(image, 'B3_masked')
    res_B3 = selectFromImage(image, 'B3_masked_residual')
    res_B5 = selectFromImage(image, 'B5_masked_residual')
    res_B6 = selectFromImage(image, 'B6_masked_residual')
    rec_B3 = selectFromImage(image, recon_B3)
    rec_B6 = selectFromImage(image, recon_B6)
    expression = 'eval(T_snow=(%(const1)s-%(recon_B6)s)*(%(observed_B3)s-%(recon_B3)s)/(%(const2)s-%(recon_B3)s), ' \
                 'Step1=(%(resB3)s)>(%(const3)s), ' \
                 'Step2=((%(resB5)s)>(%(const4)s))&&((%(resB6)s)<T_snow), ' \
                 'Step3=((%(resB5)s)<(%(const5)s))&&((%(resB6)s)<(%(const6)s)), ' \
                 'Snow=Step1&&Step2, ' \
                 'Cloud=Step1&&(not(Step2)), ' \
                 'Cloud_shadow=(not(Step1))&&Step3); ' \
                 '%(out)s=Cloud*3 + Snow*2 + Cloud_shadow;' \
                 % {'const1': const1, 'recon_B6': rec_B6, 'observed_B3': B3_observe, 'recon_B3': rec_B3,
                    'const2': const2, 'resB3': res_B3, 'const3': const3, 'resB5': res_B5, 'const4': const4,
                    'resB6': res_B6, 'const5': const5, 'const6': const6, 'out': raster_out}
    r.mapcalc(expression=expression, overwrite=True)
    image.append(raster_out)
    logging.info('classify')
Exemplo n.º 23
0
def lee_filter(img, size, img_out):
    
    pid = str(os.getpid())
    img_mean     = 'tmp%s_img_mean'     % pid
    img_sqr      = 'tmp%s_img_sqr'      % pid
    img_sqr_mean = 'tmp%s_img_sqr_mean' % pid
    img_variance = 'tmp%s_img_variance' % pid
    img_weights  = 'tmp%s_img_weights'  % pid

    # Local mean
    r.neighbors(input = img, 
                size = size, 
                method = 'average',
                output = img_mean)
    # Local square mean         
    r.mapcalc("%s = %s^2" % (img_sqr, img))
    r.neighbors(input = img_sqr, 
                size = size, 
                method = 'average',
                output = img_sqr_mean)
    # Local variance
    r.mapcalc("%s = %s - (%s^2)" % (img_variance, 
                img_sqr_mean, img_mean))
    # Overall variance
    return_univar = grass.read_command('r.univar', 
                map = img, flags = 'ge')
    univar_stats = grass.parse_key_val(return_univar)
    overall_variance = univar_stats['variance']
    # Weights
    r.mapcalc("%s = %s / (%s + %s)" % (img_weights, img_variance, 
                img_variance, overall_variance))
    # Output
    r.mapcalc("%s = %s + %s * (%s - %s)" % (img_out, img_mean, 
                img_weights, img, img_mean))

    # Cleanup
    grass.message(_("Cleaning up intermediate files..."))
    try:
        grass.run_command('g.remove', flags = 'f', quiet = False, 
                type = 'raster', pattern = 'tmp*')
    except:
        ""

    return img_out
Exemplo n.º 24
0
def lee_filter(img, size, img_out):

    pid = str(os.getpid())
    img_mean = "tmp%s_img_mean" % pid
    img_sqr = "tmp%s_img_sqr" % pid
    img_sqr_mean = "tmp%s_img_sqr_mean" % pid
    img_variance = "tmp%s_img_variance" % pid
    img_weights = "tmp%s_img_weights" % pid

    # Local mean
    r.neighbors(input=img, size=size, method="average", output=img_mean)
    # Local square mean
    r.mapcalc("%s = %s^2" % (img_sqr, img))
    r.neighbors(input=img_sqr,
                size=size,
                method="average",
                output=img_sqr_mean)
    # Local variance
    r.mapcalc("%s = %s - (%s^2)" % (img_variance, img_sqr_mean, img_mean))
    # Overall variance
    return_univar = grass.read_command("r.univar", map=img, flags="ge")
    univar_stats = grass.parse_key_val(return_univar)
    overall_variance = univar_stats["variance"]
    # Weights
    r.mapcalc("%s = %s / (%s + %s)" %
              (img_weights, img_variance, img_variance, overall_variance))
    # Output
    r.mapcalc("%s = %s + %s * (%s - %s)" %
              (img_out, img_mean, img_weights, img, img_mean))

    # Cleanup
    grass.message(_("Cleaning up intermediate files..."))
    try:
        grass.run_command("g.remove",
                          flags="f",
                          quiet=False,
                          type="raster",
                          pattern="tmp*")
    except:
        """ """

    return img_out
Exemplo n.º 25
0
def yield_pix_process(opts, vector_forest, yield_, yield_surface, rivers,
                      lakes, forest_roads, m1, m2, m1t1, m1t2, roughness):
    pid = os.getpid()
    tmp_slope = 'tmprgreen_%i_slope' % pid
    tmp_slope_deg = 'tmprgreen_%i_slope_deg' % pid
    technical_surface = "tmprgreen_%i_technical_surface" % pid
    cable_crane_extraction = "tmprgreen_%i_cable_crane_extraction" % pid
    forwarder_extraction = "tmprgreen_%i_forwarder_extraction" % pid
    other_extraction = "tmprgreen_%i_other_extraction" % pid

    run_command("r.param.scale",
                overwrite=True,
                input=opts['elevation'],
                output="morphometric_features",
                size=3,
                method="feature")
    # peaks have an higher cost/distance in order not to change the valley

    expr = "{pix_cross} = ((ewres()+nsres())/2)/ cos({tmp_slope_deg})"
    r.mapcalc(expr.format(pix_cross=('tmprgreen_%i_pix_cross' % pid),
                          tmp_slope_deg=tmp_slope_deg),
              overwrite=True)
    #FIXME: yield surface is a plan surface and not the real one of the forest
    #unit, do I compute the real one?#
    # if yield_pix1 == 0 then yield is 0, then I can use yield or
    #  use yeld_pix but I will compute it only once in the code
    run_command("r.mapcalc",
                overwrite=True,
                expression=('yield_pix1 = (' + yield_ + '/' + yield_surface +
                            ')*((ewres()*nsres())/10000)'))

    run_command("r.null", map="yield_pix1", null=0)
    run_command("r.null", map="morphometric_features", null=0)

    # FIXME: initial control on the yield in order to verify if it is positive
    #    exprmap = ("{frict_surf_extr} = {pix_cross} + if(yield_pix1<=0, 99999)"
    #               "+ if({morphometric_features}==6, 99999)")

    exprmap = ("{frict_surf_extr} = {pix_cross}"
               "+ if({morphometric_features}==6, 99999)")
    if rivers:
        run_command("v.to.rast",
                    input=rivers,
                    output=('tmprgreen_%i_rivers' % pid),
                    use="val",
                    value=99999,
                    overwrite=True)
        run_command("r.null", map=rivers, null=0)
        exprmap += "+ %s" % ('tmprgreen_%i_rivers' % pid)

    if lakes:
        run_command("v.to.rast",
                    input=lakes,
                    output=('tmprgreen_%i_lakes' % pid),
                    use="val",
                    value=99999,
                    overwrite=True)
        run_command("r.null", map=lakes, null=0)
        exprmap += '+ %s' % ('tmprgreen_%i_lakes' % pid)

    frict_surf_extr = 'tmprgreen_%i_frict_surf_extr' % pid
    extr_dist = 'tmprgreen_%i_extr_dist' % pid
    r.mapcalc(exprmap.format(
        frict_surf_extr=frict_surf_extr,
        pix_cross=('tmprgreen_%i_pix_cross' % pid),
        morphometric_features='morphometric_features',
    ),
              overwrite=True)

    run_command("r.cost",
                overwrite=True,
                input=frict_surf_extr,
                output=extr_dist,
                stop_points=vector_forest,
                start_rast='tmprgreen_%i_forest_roads' % pid,
                max_cost=1500)
    slp_min_cc = opts['slp_min_cc']
    slp_max_cc = opts['slp_max_cc']
    dist_max_cc = opts['dist_max_cc']
    ccextr = ("{cable_crane_extraction} = if({yield_} >0 && {tmp_slope}"
              "> {slp_min_cc} && {tmp_slope} <= {slp_max_cc} && {extr_dist}<"
              "{dist_max_cc} , 1)")
    r.mapcalc(ccextr.format(cable_crane_extraction=cable_crane_extraction,
                            yield_=yield_,
                            tmp_slope=tmp_slope,
                            slp_min_cc=slp_min_cc,
                            slp_max_cc=slp_max_cc,
                            dist_max_cc=dist_max_cc,
                            extr_dist=extr_dist),
              overwrite=True)

    fwextr = ("{forwarder_extraction} = if({yield_}>0 && {tmp_slope}<="
              "{slp_max_fw} && ({roughness} ==0 ||"
              "{roughness}==1 || {roughness}==99999) &&"
              "{extr_dist}<{dist_max_fw}, {m1}*1)")

    r.mapcalc(fwextr.format(forwarder_extraction=forwarder_extraction,
                            yield_=yield_,
                            tmp_slope=tmp_slope,
                            slp_max_fw=opts['slp_max_fw'],
                            m1=m1,
                            roughness=roughness,
                            dist_max_fw=opts['dist_max_fw'],
                            extr_dist=extr_dist),
              overwrite=True)

    oextr = ("{other_extraction} = if({yield_}>0 &&"
             "{tmp_slope}<={slp_max_cop} &&"
             "({roughness}==0 || {roughness}==1 ||"
             "{roughness}==99999) && {extr_dist}< {dist_max_cop}, {m2}*1)")

    r.mapcalc(oextr.format(other_extraction=other_extraction,
                           yield_=yield_,
                           tmp_slope=tmp_slope,
                           slp_max_cop=opts['slp_max_cop'],
                           m2=m2,
                           roughness=roughness,
                           dist_max_cop=opts['dist_max_cop'],
                           extr_dist=extr_dist),
              overwrite=True)

    run_command("r.null", map=cable_crane_extraction, null=0)
    run_command("r.null", map=forwarder_extraction, null=0)
    run_command("r.null", map=other_extraction, null=0)
    # FIXME: or instead of plus
    expression = ("{technical_surface} = {cable_crane_extraction} +"
                  "{forwarder_extraction} + {other_extraction}")
    r.mapcalc(expression.format(technical_surface=technical_surface,
                                cable_crane_extraction=cable_crane_extraction,
                                forwarder_extraction=forwarder_extraction,
                                other_extraction=other_extraction),
              overwrite=True)

    run_command("r.null", map=technical_surface, null=0)
    # FIXME: in my opinion we cannot sum two different energy coefficients
    # is the energy_vol_hf including the energy_tops?
    ehf = ("{tech_bioHF} = {technical_surface}*{yield_pix}*"
           "({m1t1}*{ton_tops_hf}+"
           "{m1t2}*({ton_vol_hf}+{ton_tops_hf}))")
    tech_bioHF = ('tmprgreen_%i_tech_bioenergyHF' % pid)
    r.mapcalc(ehf.format(tech_bioHF=tech_bioHF,
                         technical_surface=technical_surface,
                         m1t1=m1t1,
                         m1t2=m1t2,
                         yield_pix='yield_pix1',
                         ton_tops_hf=opts['ton_tops_hf'],
                         ton_vol_hf=opts['ton_vol_hf']),
              overwrite=True)
    tech_bioC = 'tmprgreen_%i_tech_bioenergyC' % pid
    ecc = ("{tech_bioC} = {technical_surface}*{m2}*{yield_pix}"
           "*{ton_tops_cop}")
    r.mapcalc(ecc.format(tech_bioC=tech_bioC,
                         technical_surface=technical_surface,
                         m2=m2,
                         yield_pix='yield_pix1',
                         ton_tops_cop=opts['ton_tops_cop']),
              overwrite=True)
    technical_bioenergy = "tmprgreen_%i_techbio" % pid
    exp = "{technical_bioenergy}={tech_bioHF}+{tech_bioC}"
    r.mapcalc(exp.format(technical_bioenergy=technical_bioenergy,
                         tech_bioC=tech_bioC,
                         tech_bioHF=tech_bioHF),
              overwrite=True)

    run_command("r.null", map=technical_bioenergy, null=0)
    #FIXME: use something more efficient
    with RasterRow(technical_bioenergy) as pT:
        T = np.array(pT)
    print(("Tech bioenergy stimated (ton): %.2f" % np.nansum(T)))
    return technical_bioenergy, tech_bioC, tech_bioHF
Exemplo n.º 26
0
def main():
    """
    Adds GSFLOW parameters to a set of HRU sub-basins
    """

    ##################
    # OPTION PARSING #
    ##################

    options, flags = gscript.parser()
    basins = options['input']
    HRU = options['output']
    slope = options['slope']
    aspect = options['aspect']
    elevation = options['elevation']
    land_cover = options['cov_type']
    soil = options['soil_type']

    ################################
    # CREATE HRUs FROM SUB-BASINS  #
    ################################

    g.copy(vector=(basins,HRU), overwrite=gscript.overwrite())

    ############################################
    # ATTRIBUTE COLUMNS (IN ORDER FROM MANUAL) #
    ############################################

    # HRU
    hru_columns = []
    # Self ID
    hru_columns.append('id integer') # nhru
    # Basic Physical Attributes (Geometry)
    hru_columns.append('hru_area double precision') # acres (!!!!)
    hru_columns.append('hru_area_m2 double precision') # [not for GSFLOW: for me!]
    hru_columns.append('hru_aspect double precision') # Mean aspect [degrees]
    hru_columns.append('hru_elev double precision') # Mean elevation
    hru_columns.append('hru_lat double precision') # Latitude of centroid
    hru_columns.append('hru_lon double precision') # Longitude of centroid
                                                   # unnecessary but why not?
    hru_columns.append('hru_slope double precision') # Mean slope [percent]
    # Basic Physical Attributes (Other)
    #hru_columns.append('hru_type integer') # 0=inactive; 1=land; 2=lake; 3=swale; almost all will be 1
    #hru_columns.append('elev_units integer') # 0=feet; 1=meters. 0=default. I think I will set this to 1 by default.
    # Measured input
    hru_columns.append('outlet_sta integer') # Index of streamflow station at basin outlet:
                                             # station number if it has one, 0 if not
    # Note that the below specify projections and note lat/lon; they really seem
    # to work for any projected coordinates, with _x, _y, in meters, and _xlong, 
    # _ylat, in feet (i.e. they are just northing and easting). The meters and feet
    # are not just simple conversions, but actually are required for different
    # modules in the code, and are hence redundant but intentional.
    hru_columns.append('hru_x double precision') # Easting [m]
    hru_columns.append('hru_xlong double precision') # Easting [feet]
    hru_columns.append('hru_y double precision') # Northing [m]
    hru_columns.append('hru_ylat double precision') # Northing [feet]
    # Streamflow and lake routing
    hru_columns.append('K_coef double precision') # Travel time of flood wave to next downstream segment;
                                                  # this is the Muskingum storage coefficient
                                                  # 1.0 for reservoirs, diversions, and segments flowing
                                                  # out of the basin
    hru_columns.append('x_coef double precision') # Amount of attenuation of flow wave;
                                                  # this is the Muskingum routing weighting factor
                                                  # range: 0.0--0.5; default 0.2
                                                  # 0 for all segments flowing out of the basin
    hru_columns.append('hru_segment integer') # ID of stream segment to which flow will be routed
                                              # this is for non-cascade routing (flow goes directly
                                              # from HRU to stream segment)
    hru_columns.append('obsin_segment integer') # Index of measured streamflow station that replaces
                                                # inflow to a segment
    hru_columns.append('cov_type integer') # 0=bare soil;1=grasses; 2=shrubs; 3=trees; 4=coniferous
    hru_columns.append('soil_type integer') # 1=sand; 2=loam; 3=clay

    # Create strings
    hru_columns = ",".join(hru_columns)

    # Add columns to tables
    v.db_addcolumn(map=HRU, columns=hru_columns, quiet=True)


    ###########################
    # UPDATE DATABASE ENTRIES #
    ###########################

    colNames = np.array(gscript.vector_db_select(HRU, layer=1)['columns'])
    colValues = np.array(gscript.vector_db_select(HRU, layer=1)['values'].values())
    number_of_hrus = colValues.shape[0]
    cats = colValues[:,colNames == 'cat'].astype(int).squeeze()
    rnums = colValues[:,colNames == 'rnum'].astype(int).squeeze()

    nhru = np.arange(1, number_of_hrus + 1)
    nhrut = []
    for i in range(len(nhru)):
      nhrut.append( (nhru[i], cats[i]) )
    # Access the HRUs 
    hru = VectorTopo(HRU)
    # Open the map with topology:
    hru.open('rw')
    # Create a cursor
    cur = hru.table.conn.cursor()
    # Use it to loop across the table
    cur.executemany("update "+HRU+" set id=? where cat=?", nhrut)
    # Commit changes to the table
    hru.table.conn.commit()
    # Close the table
    hru.close()

    """
    # Do the same for basins <-------------- DO THIS OR SIMPLY HAVE HRUs OVERLAIN WITH GRID CELLS? IN THIS CASE, RMV AREA ADDITION TO GRAVRES
    v.db_addcolumn(map=basins, columns='id int', quiet=True)
    basins = VectorTopo(basins)
    basins.open('rw')
    cur = basins.table.conn.cursor()
    cur.executemany("update basins set id=? where cat=?", nhrut)
    basins.table.conn.commit()
    basins.close()
    """

    # if you want to append to table
    # cur.executemany("update HRU(id) values(?)", nhrut) # "insert into" will add rows

    #hru_columns.append('hru_area double precision')
    # Acres b/c USGS
    v.to_db(map=HRU, option='area', columns='hru_area', units='acres', quiet=True)
    v.to_db(map=HRU, option='area', columns='hru_area_m2', units='meters', quiet=True)

    # GET MEAN VALUES FOR THESE NEXT ONES, ACROSS THE BASIN

    # SLOPE (and aspect) 
    #####################
    v.rast_stats(map=HRU, raster=slope, method='average', column_prefix='tmp', flags='c', quiet=True)
    v.db_update(map=HRU, column='hru_slope', query_column='tmp_average', quiet=True)

    # ASPECT
    #########
    v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True)
    # Dealing with conversion from degrees (no good average) to something I can
    # average -- x- and y-vectors
    # Geographic coordinates, so sin=x, cos=y.... not that it matters so long 
    # as I am consistent in how I return to degrees
    r.mapcalc('aspect_x = sin(' + aspect + ')', overwrite=gscript.overwrite(), quiet=True)
    r.mapcalc('aspect_y = cos(' + aspect + ')', overwrite=gscript.overwrite(), quiet=True)
    #grass.run_command('v.db.addcolumn', map=HRU, columns='aspect_x_sum double precision, aspect_y_sum double precision, ncells_in_hru integer')
    v.rast_stats(map=HRU, raster='aspect_x', method='sum', column_prefix='aspect_x', flags='c', quiet=True)
    v.rast_stats(map=HRU, raster='aspect_y', method='sum', column_prefix='aspect_y', flags='c', quiet=True)
    hru = VectorTopo(HRU)
    hru.open('rw')
    cur = hru.table.conn.cursor()
    cur.execute("SELECT cat,aspect_x_sum,aspect_y_sum FROM %s" %hru.name)
    _arr = np.array(cur.fetchall()).astype(float)
    _cat = _arr[:,0]
    _aspect_x_sum = _arr[:,1]
    _aspect_y_sum = _arr[:,2]
    aspect_angle = np.arctan2(_aspect_y_sum, _aspect_x_sum) * 180. / np.pi
    aspect_angle[aspect_angle < 0] += 360 # all positive
    aspect_angle_cat = np.vstack((aspect_angle, _cat)).transpose()
    cur.executemany("update "+ HRU +" set hru_aspect=? where cat=?", aspect_angle_cat)
    hru.table.conn.commit()
    hru.close()

    # ELEVATION
    ############
    v.rast_stats(map=HRU, raster=elevation, method='average', column_prefix='tmp', flags='c', quiet=True)
    v.db_update(map=HRU, column='hru_elev', query_column='tmp_average', quiet=True)
    v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True)

    # CENTROIDS 
    ############

    # get x,y of centroid -- but have areas not in database table, that do have
    # centroids, and having a hard time finding a good way to get rid of them!
    # They have duplicate category values!
    # Perhaps these are little dangles on the edges of the vectorization where
    # the raster value was the same but pinched out into 1-a few cells?
    # From looking at map, lots of extra centroids on area boundaries, and removing
    # small areas (though threshold hard to guess) gets rid of these

    hru = VectorTopo(HRU)
    hru.open('rw')
    hru_cats = []
    hru_coords = []
    for hru_i in hru:
        if type(hru_i) is vector.geometry.Centroid:
            hru_cats.append(hru_i.cat)
            hru_coords.append(hru_i.coords())
    hru_cats = np.array(hru_cats)
    hru_coords = np.array(hru_coords)
    hru.rewind()
    
    hru_area_ids = []
    for coor in hru_coords:
        _area = hru.find_by_point.area(Point(coor[0], coor[1]))
        hru_area_ids.append(_area)
    hru_area_ids = np.array(hru_area_ids)
    hru.rewind()

    hru_areas = []
    for _area_id in hru_area_ids:
        hru_areas.append(_area_id.area())
    hru_areas = np.array(hru_areas)
    hru.rewind()
      
    allcats = sorted(list(set(list(hru_cats))))
    
    # Now create weighted mean
    hru_centroid_locations = []
    for cat in allcats:
        hrus_with_cat = hru_cats[hru_cats == cat]
        if len(hrus_with_cat) == 1:
            hru_centroid_locations.append((hru_coords[hru_cats == cat]).squeeze())
        else:
            _centroids = hru_coords[hru_cats == cat]
            #print _centroids
            _areas = hru_areas[hru_cats == cat]
            #print _areas
            _x = np.average(_centroids[:,0], weights=_areas)
            _y = np.average(_centroids[:,1], weights=_areas)
            #print _x, _y
            hru_centroid_locations.append(np.array([_x, _y]))
          
    # Now upload weighted mean to database table
    # allcats and hru_centroid_locations are co-indexed
    index__cats = create_iterator(HRU)
    cur = hru.table.conn.cursor()
    for i in range(len(allcats)):
        # meters
        cur.execute('update '+HRU
                    +' set hru_x='+str(hru_centroid_locations[i][0])
                    +' where cat='+str(allcats[i]))
        cur.execute('update '+HRU
                    +' set hru_y='+str(hru_centroid_locations[i][1])
                    +' where cat='+str(allcats[i]))
        # feet
        cur.execute('update '+HRU
                    +' set hru_xlong='+str(hru_centroid_locations[i][0]*3.28084)
                    +' where cat='+str(allcats[i]))
        cur.execute('update '+HRU
                    +' set hru_ylat='+str(hru_centroid_locations[i][1]*3.28084)
                    +' where cat='+str(allcats[i]))
        # (un)Project to lat/lon
        _centroid_ll = gscript.parse_command('m.proj',
                                             coordinates=
                                             list(hru_centroid_locations[i]),
                                             flags='od').keys()[0]
        _lon, _lat, _z = _centroid_ll.split('|')
        cur.execute('update '+HRU
                    +' set hru_lon='+_lon
                    +' where cat='+str(allcats[i]))
        cur.execute('update '+HRU
                    +' set hru_lat='+_lat
                    +' where cat='+str(allcats[i]))

    # feet -- not working.
    # Probably an issue with index__cats -- maybe fix later, if needed
    # But currently not a major speed issue
    """
    cur.executemany("update "+HRU+" set hru_xlong=?*3.28084 where hru_x=?", 
                    index__cats)
    cur.executemany("update "+HRU+" set hru_ylat=?*3.28084 where hru_y=?", 
                    index__cats)
    """                    

    cur.close()
    hru.table.conn.commit()
    hru.close()

    # ID NUMBER
    ############
    #cur.executemany("update "+HRU+" set hru_segment=? where id=?", 
    #                index__cats)
    # Segment number = HRU ID number
    v.db_update(map=HRU, column='hru_segment', query_column='id', quiet=True)

    # LAND USE/COVER
    ############
    try:
        land_cover = int(land_cover)
    except:
        pass
    if type(land_cover) is int:
        if land_cover <= 3:
            v.db_update(map=HRU, column='cov_type', value=land_cover, quiet=True)
        else:
            sys.exit("WARNING: INVALID LAND COVER TYPE. CHECK INTEGER VALUES.\n"
                     "EXITING TO ALLOW USER TO CHANGE BEFORE RUNNING GSFLOW")
    else:
        # NEED TO UPDATE THIS TO MODAL VALUE!!!!
        gscript.message("Warning: values taken from HRU centroids. Code should be updated to")
        gscript.message("acquire modal values")
        v.what_rast(map=HRU, type='centroid', raster=land_cover, column='cov_type', quiet=True)
        #v.rast_stats(map=HRU, raster=land_cover, method='average', column_prefix='tmp', flags='c', quiet=True)
        #v.db_update(map=HRU, column='cov_type', query_column='tmp_average', quiet=True)
        #v.db_dropcolumn(map=HRU, columns='tmp_average', quiet=True)

    # SOIL
    ############
    try:
        soil = int(soil)
    except:
        pass
    if type(soil) is int:
        if (soil > 0) and (soil <= 3):
            v.db_update(map=HRU, column='soil_type', value=soil, quiet=True)
        else:
            sys.exit("WARNING: INVALID SOIL TYPE. CHECK INTEGER VALUES.\n"
                     "EXITING TO ALLOW USER TO CHANGE BEFORE RUNNING GSFLOW")
    else:
        # NEED TO UPDATE THIS TO MODAL VALUE!!!!
        gscript.message("Warning: values taken from HRU centroids. Code should be updated to")
        gscript.message("acquire modal values")
        v.what_rast(map=HRU, type='centroid', raster=soil, column='soil_type', quiet=True)
Exemplo n.º 27
0
if Settings.DEM_input != '':
    # Import DEM and set region
    r.in_gdal(input=Settings.DEM_input,
              output=DEM_original_import,
              overwrite=True)
    g.region(raster=DEM_original_import)
    # Build flow accumulation with only fully on-map flow
    # Cell areas
    r.cell_area(output=cellArea_meters2, units='m2', overwrite=True)
    # Flow weights (e.g., precipitation
    # Test first if it is an existing raster; if not, import
    rastersAll = np.array(
        list(set(list(gscript.parse_command('g.list', type='raster')))))
    if Settings.flow_weights in rastersAll:
        # NOTE: Here, this might not necessarily be called "flow_weights"
        r.mapcalc(flow + ' = ' + cellArea_meters2 * Settings.flow_weights,
                  overwrite=True)
    else:
        r.in_gdal(input=Settings.flow_weights,
                  output=flow_weights,
                  overwrite=True)
        r.mapcalc(flow + ' = ' + cellArea_meters2 * flow_weights,
                  overwrite=True)
    # Hydrologic correction
    r.hydrodem(input=DEM_original_import,
               output=DEM,
               flags='a',
               overwrite=True)
    # No offmap flow
    r.watershed(elevation=DEM,
                flow=flow,
                accumulation=accumulation,
Exemplo n.º 28
0
from mower import GrassSession

DEM = "/home/mperry/projects/shortcreek/dem/dem.img"

with GrassSession(DEM) as gs:
    from grass.pygrass.modules.shortcuts import raster

    # Import/Link to External GDAL data
    raster.external(input=DEM, output="dem")

    # Perform calculations
    raster.mapcalc(expression="demft=dem*3.28084")
    raster.slope_aspect(elevation="demft", slope="slope", aspect="aspect")

    # Export from GRASS to GDAL
    from grass.pygrass.gis import Mapset
    m = Mapset()
    for r in m.glist('rast'):
    	if r == "dem":
    		# don't save the original
    		continue

    	raster.out_gdal(r, format="GTiff", output="/tmp/{}.tif".format(r), overwrite=True)
Exemplo n.º 29
0
def TMaskAlgorithm(images,
                   BACKUP_ALG_THRESHOLD=15,
                   RADIUS_BUFF=3,
                   T_MEDIAN_THRESHOLD=0.04,
                   BLUE_CHANNEL_PURE_SNOW_THRESHOLD=0.4,
                   NIR_CHANNEL_PURE_SNOW_THRESHOLD=0.12,
                   BLUE_CHANNEL_THRESHOLD=0.04,
                   NIR_CHANNEL_CLOUD_SNOW_THRESHOLD=0.04,
                   NIR_CHANNEL_SHADOW_CLEAR_THRESHOLD=-0.04,
                   SWIR1_CHANNEL_SHADOW_CLEAR_THRESHOLD=-0.04):

    # sorting by date:
    images.sort(key=lambda im: getDate(im[0]), reverse=True)

    # the size of the collection:
    ImageCounts = len(images)
    text1 = 'Total number of images: %s' % (ImageCounts)
    text2 = 'Warning: You have less than %s images!' % (BACKUP_ALG_THRESHOLD)
    if ImageCounts >= BACKUP_ALG_THRESHOLD:
        logging.info(text1)
    else:
        logging.info(text2)

    # FMask, composite and non-snow masks:
    FMask_collection = [FMask(im, RADIUS_BUFF) for im in images]
    for im in images:
        delete(im, 'BQA')

    # reducing FMask collection:
    r.series(input=FMask_collection,
             output='ConditionMap.const',
             method='sum',
             overwrite=True)
    ConditionMap = 'ConditionMap.const'
    map(lambda im: g.remove(type='raster', name=im, flags='fb'),
        FMask_collection)

    # detect which part of data should be used for BackUp algorithm:
    expression = 'ClearSmall.const=%(im)s>(%(all)s-%(thresh)s)' % {
        'im': ConditionMap,
        'all': ImageCounts,
        'thresh': BACKUP_ALG_THRESHOLD
    }
    r.mapcalc(expression=expression, overwrite=True)
    ClearSmall = 'ClearSmall.const'
    g.remove(type='raster', name=ConditionMap, flags='fb')

    # forming non-snow pixels:
    for im in images:
        BackUp_mask(im, ClearSmall)
        delete(im, 'nonSnow')

    # calculate mediana for potential clear pixels in BackUp approach:
    r.series(input=selectFromCollection(images, 'B3_masked'),
             output='Mediana.const',
             method='median',
             overwrite=True)
    Mediana = 'Mediana.const'
    for im in images:
        delete(im, 'B3_masked')

    # BackUp algorithm:
    for im in images:
        BackUpAlgorithm(im, ClearSmall, Mediana, T_MEDIAN_THRESHOLD)
        delete(im, 'Composite')
    g.remove(type='raster', name=Mediana, flags='fb')

    # create mask for TMask algorithm:
    for im in images:
        TMaskp_mask(im)
        #delete(im, 'B3_toar')
        #delete(im, 'B5_toar')
        #delete(im, 'B6_toar')
    #g.remove(type='raster', name=ClearSmall, flags='fb')

    # regression for blue, NIR, SWIR channel:
    RobustRegression(images, 'B3_masked', fet=0.5, dod=2, order=1, iterates=2)
    RobustRegression(images, 'B5_masked', fet=0.5, dod=2, order=1, iterates=2)
    RobustRegression(images, 'B6_masked', fet=0.5, dod=2, order=1, iterates=2)

    # getting residuals:
    for im in images:
        getResidual(im, selectFromImage(im, 'B3_masked_lwr_lwr'), 'B3_masked')
        getResidual(im, selectFromImage(im, 'B5_masked_lwr_lwr'), 'B5_masked')
        getResidual(im, selectFromImage(im, 'B6_masked_lwr_lwr'), 'B6_masked')
        delete(im, 'B5_masked')
        delete(im, 'B6_masked')
        #delete(im, 'B5_masked_lwr_lwr')

    # classification:
    const1 = NIR_CHANNEL_PURE_SNOW_THRESHOLD
    const2 = BLUE_CHANNEL_PURE_SNOW_THRESHOLD
    const3 = BLUE_CHANNEL_THRESHOLD
    const4 = NIR_CHANNEL_CLOUD_SNOW_THRESHOLD
    const5 = NIR_CHANNEL_SHADOW_CLEAR_THRESHOLD
    const6 = SWIR1_CHANNEL_SHADOW_CLEAR_THRESHOLD
    for im in images:
        classify(im, const1, const2, const3, const4, const5, const6,
                 'B3_masked_lwr_lwr', 'B6_masked_lwr_lwr')
        delete(im, 'B3_masked_lwr_lwr')
        delete(im, 'B6_masked_lwr_lwr')
        delete(im, 'B3_masked')
        delete(im, 'B3_masked_residual')
        delete(im, 'B5_masked_residual')
        delete(im, 'B6_masked_residual')

    for im in images:
        basename = im[0].split('.')[0]
        out = basename + '.Mask'
        expression = '%(out)s=(%(mask1)s) + (%(mask2)s*not(%(mask1)s))' \
                     %{'out':out, 'mask1': selectFromImage(im,'BackUpMask'), 'mask2': selectFromImage(im,'TMask')}
        r.mapcalc(expression=expression, overwrite=True)
        delete(im, 'BackUpMask')
        delete(im, 'TMask')
        im.append(out)

    return images
Exemplo n.º 30
0
def main():
    soillossin = options['soillossin']
    soillossout = options['soillossout']
    factorold = options['factorold']
    
    factornew = options['factornew']
    map = options['map']
    factorcol = options['factorcol']
    
    flag_p = flags['p'] # patch factornew with factorold
    flag_k = flags['k'] # calculate k-factor components from % clay p_T, silt p_U, stones p_st, humus p_H 

     
    if not factornew:
        factors = {}
        if flag_k:
            gscript.message('Using factor derived from \
                soil components.')
            parcelmap = Vect(map)
            parcelmap.open(mode='rw', layer=1)
            parcelmap.table.filters.select()
            cur = parcelmap.table.execute()
            col_names = [cn[0] for cn in cur.description]
            rows = cur.fetchall()
           
            for col in (u'Kb',u'Ks',u'Kh', u'K'):
                if col not in parcelmap.table.columns:
                    parcelmap.table.columns.add(col,u'DOUBLE')
           
            for row in rows:
                rowid = row[1]
                p_T = row[7]
                p_U = row[8]
                p_st = row[9]
                p_H = row[10]
    
                print("Parzelle mit id %d :" %rowid)
                for sublist in bodenarten:
                    # p_T and p_U
                    if p_T in range(sublist[2],sublist[3]) \
                        and p_U in range(sublist[4],sublist[5]) :
                        print('Bodenart "' + sublist[1] 
                            + '", Kb = ' + str(sublist[6]))
                        Kb = sublist[6]
                        break
                
                for sublist in skelettgehalte:
                    if p_st < sublist[0]:
                        print('Skelettgehaltsklasse bis ' + str(sublist[0]) 
                            + ' , Ks = ' + str(sublist[1]))
                        Ks = sublist[1]
                        break
            
                   
                for sublist in humusgehalte:
                    if p_H < sublist[0]:
                        print('Humusgehaltsklasse bis ' + str(sublist[0]) 
                            + ' , Ks = ' + str(sublist[1]))
                        Kh = sublist[1]
                        break
                
                
                K = Kb * Ks * Kh
                print('K = ' + str(K))
        
                if K > 0:
                    parcelmap.table.execute("UPDATE " +  parcelmap.name 
                        + " SET"
                        + " Kb=" + str(Kb)
                        + ", Ks=" + str(Ks)
                        + ", Kh=" + str(Kh)
                        + ", K=" + str(K)
                        + " WHERE id=" + str(rowid) )
                    parcelmap.table.conn.commit()
                
            parcelmap.close()
            factorcol2 = 'K'
            
            factors['k'] = map.split('@')[0]+'.tmp.'+factorcol2
            v.to_rast(input=map, use='attr',
                   attrcolumn=factorcol2,
                   output=factors['k'])
            r.null(map=factors['k'], setnull='0')

        
        if factorcol:
            gscript.message('Using factor from column %s of \
                    vector map <%s>.' % (factorcol, map) )
                    
            factors['factorcol'] = map.split('@')[0]+'.tmp.' + factorcol
            v.to_rast(input=map, use='attr',
                   attrcolumn=factorcol,
                   output=factors['factorcol'])
            r.null(map=factors['factorcol'], setnull='0')
        
        print factors.keys()
        if not 'k' in factors and not 'factorcol' in factors: 
            gscript.fatal('Please provide either factor \
                raster map or valid vector map with factor column \
                (kfactor) or factor components columns (Kb, Ks, Kh)' )
        
        #if 'k' in factors and 'factorcol' in factors: 
    
        factornew = map.split('@')[0]+'.kfactor'
        if 'k' in factors and 'factorcol' in  factors:
            factornew = map.split('@')[0]+'.kfactor'
            r.patch(input=(factors['factorcol'],factors['k']),
                    output=factornew)
            
        elif 'k' in factors:
            g.copy(rast=(factors['k'],factornew))
            
        elif 'factorcol' in factors:
            g.copy(rast=(factors['factorcol'],factornew))

            
    if flag_p:
        #factorcorr = factorold + '.update'
        r.patch(input=(factornew,factorold), output=factornew)
        
    formula = soillossout + '=' + soillossin \
                + '/' + factorold  \
                + '*' + factornew
    r.mapcalc(formula)
            
    r.colors(map=soillossout, raster=soillossin)
#sourcedir = '/media/awickert/Elements/Fluvial 2015/151109_MC_IW_01/Processed/'
#sourcedir = '/media/awickert/data3/TerraceExperiment/Fluvial 2015/151109_MC_IW_01/Processed/'
#sourcedirs = sorted(next(os.walk('/media/awickert/data3/TerraceExperiment/Fluvial 2015/'))[1])
#sourcedirs = sorted(glob.glob('/data3/TerraceExperiment/Forgotten/*/Processed/'))
sourcedirs = sorted(glob.glob('/data3/TerraceExperiment/Fluvial 2015/*/Processed/'))

length_y_trimmed = margin_top - margin_bottom
length_x_trimmed = margin_right - margin_left

g.region(w=margin_left/1000., e=margin_right/1000., s=margin_bottom/1000., n=margin_top/1000., res=0.001, flags='s')

# Maps of x and y
g.region(w=0, s=0, e=int(np.floor(margin_right*1.5))/1000., n=int(np.floor(margin_top*1.5))/1000.)
try:
  r.mapcalc('x = x()')
  r.mapcalc('y = y()')
except:
  pass
g.region(flags='d')

errordirs = []
errorfiles = []
for sourcedir in sourcedirs:
  DATpaths = sorted(glob.glob(sourcedir+'*.DAT'))
  for DATpath in DATpaths:
    # Name
    DATfile = os.path.split(DATpath)[-1]
    scanName, scanNumber = DATfile.split('_Composite')[0].split('_Scan')
    scanNameDEM_fullsize = scanName+'__DEM_full__'+scanNumber
    scanNameDEM = scanName+'__DEM__'+scanNumber
def main(opts, flgs):
    pid = os.getpid()
    pat = "tmprgreen_%i_*" % pid
    DEBUG = False
    #FIXME: debug from flag
    atexit.register(cleanup,
                    pattern=pat,
                    debug=DEBUG)

    forest = opts['forest']

    forest_roads = opts['forest_roads']
    main_roads = opts['main_roads']

    ######## start import and convert ########
    ll = [x for x in opts.keys() if sel_columns(x, 'forest_column')]
    for key in ll:
        try:
            run_command("v.to.rast",
                        input=forest,
                        output=('tmprgreen_%i_%s' % (pid, key[14:])),
                        use="attr",
                        attrcolumn=opts[key], overwrite=True)
            #FIXME: not to show the ERROR
            run_command("r.null", map=('tmprgreen_%i_%s' % (pid, key[14:])),
                        null=0)
        except Exception:
            warning('no column %s selectd, values set to 0' % key)
            run_command("r.mapcalc", overwrite=True,
                        expression=('%s=0' % 'tmprgreen_%i_%s'
                                    % (pid, key[14:])))

    run_command("v.to.rast", input=forest_roads,
                output=('tmprgreen_%i_forest_roads' % pid),
                use="val", overwrite=True)
    run_command("v.to.rast", input=main_roads,
                output=('tmprgreen_%i_main_roads' % pid),
                use="val", overwrite=True)
# FIXME: yiel surface can be computed by the code, plan surface or real?
# FIXME: this map can be create here
    yield_pix = 'tmprgreen_%i_yield_pix' % pid
    expr = ("{pix} = {yield_}/{yield_surface}*"
            "((ewres()*nsres())/10000)")
    r.mapcalc(expr.format(pix=yield_pix,
                          yield_=('tmprgreen_%i_yield' % pid),
                          yield_surface='tmprgreen_%i_yield_surface' % pid),
              overwrite=True)
    # TODO: add r.null
    ######## end import and convert ########
    dic = {'tree_diam': 35, 'tree_vol': 3, 'soilp2_map': 0.7}
    for key, val in dic.items():
        if not(opts[key]):
            warning("Not %s map, value set to %f" % (key, val))
            output = 'tmprgreen_%i_%s' % (pid, key)
            run_command("r.mapcalc", overwrite=True,
                        expression=('%s=%f' % (output, val)))
    # create combination maps to avoid if construction
    m1t1, m1t2, m1, m2, not2 = combination('tmprgreen_%i_management' % pid,
                                           'tmprgreen_%i_treatment' % pid)

    slope_computation(opts['elevation'])

    if (opts['technical_bioenergy'] and opts['tech_bioc']
        and opts['tech_biohf']):
            technical_bioenergy = opts['technical_bioenergy']
            tech_bioC = opts['tech_bioc']
            tech_bioHF = opts['tech_biohf']
            technical_surface = 'tmprgreen_%i_technical_surface' % pid
            expr = "{technical_surface} = if({technical_bioenergy}, 1, 0)"
            r.mapcalc(expr.format(technical_surface=technical_surface,
                                  technical_bioenergy=technical_bioenergy
                                  ),
                                  overwrite=True)

    else:
        #FIXME: call directly the biomassfor.technical module
        out = yield_pix_process(opts=opts, vector_forest=forest,
                                yield_=('tmprgreen_%i_yield' % pid),
                                yield_surface=('tmprgreen_%i_yield_surface' % pid),
                                rivers=opts['rivers'],
                                lakes=opts['lakes'],
                                forest_roads=('tmprgreen_%i_forest_roads' % pid),
                                m1t1=m1t1, m1t2=m1t2, m1=m1, m2=m2,
                                roughness=('tmprgreen_%i_roughness' % pid))
        technical_bioenergy, tech_bioC, tech_bioHF = out

    total_revenues = revenues(opts=opts,
                              yield_surface=('tmprgreen_%i_yield_surface'
                                             % pid),
                              m1t1=m1t1, m1t2=m1t2, m1=m1, m2=m2,
                              forest=forest,
                              yield_=('tmprgreen_%i_yield' % pid),
                              technical_bioenergy=technical_bioenergy)

    dic1, dic2 = productivity(opts=opts,
                              m1t1=m1t1, m1t2=m1t2, m1=m1, m2=m2, not2=not2,
                              soilp2_map=('tmprgreen_%i_soilp2_map' % pid),
                              tree_diam=('tmprgreen_%i_tree_diam' % pid),
                              tree_vol=('tmprgreen_%i_tree_vol' % pid),
                              forest_roads=('tmprgreen_%i_forest_roads' % pid),
                              main_roads=('tmprgreen_%i_main_roads' % pid))
    total_costs = costs(opts, total_revenues=total_revenues,
                        dic1=dic1, dic2=dic2, yield_pix="yield_pix1")
    net_revenues(opts=opts,
                 total_revenues=total_revenues,
                 technical_bioenergy=technical_bioenergy,
                 tech_bioC=tech_bioC, tech_bioHF=tech_bioHF,
                 total_costs=total_costs)
Exemplo n.º 33
0
def main():

    elevation = options['elevation']
    slope = options['slope']
    flat_thres = float(options['flat_thres'])
    curv_thres = float(options['curv_thres'])
    filter_size = int(options['filter_size'])
    counting_size = int(options['counting_size'])
    nclasses = int(options['classes'])
    texture = options['texture']
    convexity = options['convexity']
    concavity = options['concavity']
    features = options['features']

    # remove mapset from output name in case of overwriting existing map
    texture = texture.split('@')[0]
    convexity = convexity.split('@')[0]
    concavity = concavity.split('@')[0]
    features = features.split('@')[0]

    # store current region settings
    global current_reg
    current_reg = parse_key_val(g.region(flags='pg', stdout_=PIPE).outputs.stdout)
    del current_reg['projection']
    del current_reg['zone']
    del current_reg['cells']

    # check for existing mask and backup if found
    global mask_test
    mask_test = gs.list_grouped(
        type='rast', pattern='MASK')[gs.gisenv()['MAPSET']]
    if mask_test:
        global original_mask
        original_mask = temp_map('tmp_original_mask')
        g.copy(raster=['MASK', original_mask])

    # error checking
    if flat_thres < 0:
        gs.fatal('Parameter thres cannot be negative')

    if filter_size % 2 == 0 or counting_size % 2 == 0:
        gs.fatal(
            'Filter or counting windows require an odd-numbered window size')

    if filter_size >= counting_size:
        gs.fatal(
            'Filter size needs to be smaller than the counting window size')
    
    if features != '' and slope == '':
        gs.fatal('Need to supply a slope raster in order to produce the terrain classification')
                
    # Terrain Surface Texture -------------------------------------------------
    # smooth the dem
    gs.message("Calculating terrain surface texture...")
    gs.message(
        "1. Smoothing input DEM with a {n}x{n} median filter...".format(
            n=filter_size))
    filtered_dem = temp_map('tmp_filtered_dem')
    gs.run_command("r.neighbors", input = elevation, method = "median",
                    size = filter_size, output = filtered_dem, flags='c',
                    quiet=True)

    # extract the pits and peaks based on the threshold
    pitpeaks = temp_map('tmp_pitpeaks')
    gs.message("2. Extracting pits and peaks with difference > thres...")
    r.mapcalc(expression='{x} = if ( abs({dem}-{median})>{thres}, 1, 0)'.format(
                x=pitpeaks, dem=elevation, thres=flat_thres, median=filtered_dem),
                quiet=True)

    # calculate density of pits and peaks
    gs.message("3. Using resampling filter to create terrain texture...")
    window_radius = (counting_size-1)/2
    y_radius = float(current_reg['ewres'])*window_radius
    x_radius = float(current_reg['nsres'])*window_radius
    resample = temp_map('tmp_density')
    r.resamp_filter(input=pitpeaks, output=resample, filter=['bartlett','gauss'],
                    radius=[x_radius,y_radius], quiet=True)

    # convert to percentage
    gs.message("4. Converting to percentage...")
    r.mask(raster=elevation, overwrite=True, quiet=True)
    r.mapcalc(expression='{x} = float({y} * 100)'.format(x=texture, y=resample),
               quiet=True)
    r.mask(flags='r', quiet=True)
    r.colors(map=texture, color='haxby', quiet=True)

    # Terrain convexity/concavity ---------------------------------------------
    # surface curvature using lacplacian filter
    gs.message("Calculating terrain convexity and concavity...")
    gs.message("1. Calculating terrain curvature using laplacian filter...")
    
    # grow the map to remove border effects and run laplacian filter
    dem_grown = temp_map('tmp_elevation_grown')
    laplacian = temp_map('tmp_laplacian')
    g.region(n=float(current_reg['n']) + (float(current_reg['nsres']) * filter_size),
             s=float(current_reg['s']) - (float(current_reg['nsres']) * filter_size),
             w=float(current_reg['w']) - (float(current_reg['ewres']) * filter_size),
             e=float(current_reg['e']) + (float(current_reg['ewres']) * filter_size))

    r.grow(input=elevation, output=dem_grown, radius=filter_size, quiet=True)
    r.mfilter(
        input=dem_grown, output=laplacian,
        filter=string_to_rules(laplacian_matrix(filter_size)), quiet=True)

    # extract convex and concave pixels
    gs.message("2. Extracting convexities and concavities...")
    convexities = temp_map('tmp_convexities')
    concavities = temp_map('tmp_concavities')

    r.mapcalc(
        expression='{x} = if({laplacian}>{thres}, 1, 0)'\
        .format(x=convexities, laplacian=laplacian, thres=curv_thres),
        quiet=True)
    r.mapcalc(
        expression='{x} = if({laplacian}<-{thres}, 1, 0)'\
        .format(x=concavities, laplacian=laplacian, thres=curv_thres),
        quiet=True)

    # calculate density of convexities and concavities
    gs.message("3. Using resampling filter to create surface convexity/concavity...")
    resample_convex = temp_map('tmp_convex')
    resample_concav = temp_map('tmp_concav')
    r.resamp_filter(input=convexities, output=resample_convex,
                    filter=['bartlett','gauss'], radius=[x_radius,y_radius],
                    quiet=True)
    r.resamp_filter(input=concavities, output=resample_concav,
                    filter=['bartlett','gauss'], radius=[x_radius,y_radius],
                    quiet=True)

    # convert to percentages
    gs.message("4. Converting to percentages...")
    g.region(**current_reg)
    r.mask(raster=elevation, overwrite=True, quiet=True)
    r.mapcalc(expression='{x} = float({y} * 100)'.format(x=convexity, y=resample_convex),
               quiet=True)
    r.mapcalc(expression='{x} = float({y} * 100)'.format(x=concavity, y=resample_concav),
               quiet=True)
    r.mask(flags='r', quiet=True)

    # set colors
    r.colors_stddev(map=convexity, quiet=True)
    r.colors_stddev(map=concavity, quiet=True)

    # Terrain classification Flowchart-----------------------------------------
    if features != '':
        gs.message("Performing terrain surface classification...")
        # level 1 produces classes 1 thru 8
        # level 2 produces classes 5 thru 12
        # level 3 produces classes 9 thru 16
        if nclasses == 8: levels = 1
        if nclasses == 12: levels = 2
        if nclasses == 16: levels = 3

        classif = []
        for level in range(levels):
            # mask previous classes x:x+4
            if level != 0:
                min_cla = (4*(level+1))-4
                clf_msk = temp_map('tmp_clf_mask')
                rules = '1:{0}:1'.format(min_cla)
                r.recode(
                    input=classif[level-1], output=clf_msk,
                    rules=string_to_rules(rules), overwrite=True)
                r.mask(raster=clf_msk, flags='i', quiet=True, overwrite=True)

            # image statistics
            smean = r.univar(
                map=slope, flags='g', stdout_=PIPE).outputs.stdout.split(os.linesep)
            smean = [i for i in smean if i.startswith('mean=') is True][0].split('=')[1]

            cmean = r.univar(
                map=convexity, flags='g', stdout_=PIPE).outputs.stdout.split(os.linesep)
            cmean = [i for i in cmean if i.startswith('mean=') is True][0].split('=')[1]

            tmean = r.univar(
                map=texture, flags='g', stdout_=PIPE).outputs.stdout.split(os.linesep)
            tmean = [i for i in tmean if i.startswith('mean=') is True][0].split('=')[1]
            classif.append(temp_map('tmp_classes'))
            
            if level != 0:
                r.mask(flags='r', quiet=True)

            classification(level+1, slope, smean, texture, tmean,
                            convexity, cmean, classif[level])

        # combine decision trees
        merged = []
        for level in range(0, levels):
            if level > 0:
                min_cla = (4*(level+1))-4
                merged.append(temp_map('tmp_merged'))
                r.mapcalc(
                    expression='{x} = if({a}>{min}, {b}, {a})'.format(
                        x=merged[level], min=min_cla, a=merged[level-1],  b=classif[level]))
            else:
                merged.append(classif[level])
        g.rename(raster=[merged[-1], features], quiet=True)
        del TMP_RAST[-1]

    # Write metadata ----------------------------------------------------------
    history = 'r.terrain.texture '
    for key,val in options.iteritems():
        history += key + '=' + str(val) + ' '

    r.support(map=texture,
              title=texture,
              description='generated by r.terrain.texture',
              history=history)
    r.support(map=convexity,
              title=convexity,
              description='generated by r.terrain.texture',
              history=history)
    r.support(map=concavity,
              title=concavity,
              description='generated by r.terrain.texture',
              history=history)

    if features != '':
        r.support(map=features,
                  title=features,
                  description='generated by r.terrain.texture',
                  history=history)
        
        # write color and category rules to tempfiles                
        r.category(
            map=features,
            rules=string_to_rules(categories(nclasses)),
            separator='pipe')
        r.colors(
            map=features, rules=string_to_rules(colors(nclasses)), quiet=True)

    return 0
  print i, comm_code[i], years[i]
    
  # select feature
  v.extract(input = vector, output = 'vector_cat', where = 'cat = ' + str(i+1), 
    flags = 't', overwrite = True, quiet = True)
  # define region
  g.region(vector = 'vector_cat', align = map_for_define_region, flags = 'p')
  # use vector as a mask
  r.mask(vector = 'vector_cat', overwrite = True, quiet = True)
        
  # Cut maps
  
  # tree cover with zero where there was deforestation
  expr = comm_code[i] + '_treecover_GFW_2000_deforestation = if(Neotropical_Hansen_treecoverlossperyear_wgs84_2017@PERMANENT > 0 && '+ \
  'Neotropical_Hansen_treecoverlossperyear_wgs84_2017@PERMANENT < ' + str(years[i]) + ', 0, Neotropic_Hansen_percenttreecoverd_2000_wgs84@PERMANENT)'
  r.mapcalc(expr, overwrite = True)
  
  # thresholds for binary values of natural vegetation
  thresholds = [70, 80, 90]
    
  # loop to cut for each one and account for deforestation
  for tr in thresholds:
    
    # Hansen bin
    r.mapcalc(comm_code[i]+'_treecover_GFW_2000_deforestation_threshold'+str(tr)+'_binary = if('+comm_code[i]+'_treecover_GFW_2000_deforestation > '+str(tr)+', 1, 0)', 
      overwrite = True)
         
  # remove mask and vector_cat to avoid problems
  r.mask(flags = 'r')
  g.remove(type = 'vector', name = 'vector_cat', flags = 'f')
Exemplo n.º 35
0
def main():
    """
    Builds a grid for the MODFLOW component of the USGS hydrologic model,
    GSFLOW.
    """

    options, flags = gscript.parser()
    basin = options['basin']
    pp = options['pour_point']
    raster_input = options['raster_input']
    dx = options['dx']
    dy = options['dy']
    grid = options['output']
    mask = options['mask_output']
    bc_cell = options['bc_cell']
    # basin='basins_tmp_onebasin'; pp='pp_tmp'; raster_input='DEM'; raster_output='DEM_coarse'; dx=dy='500'; grid='grid_tmp'; mask='mask_tmp'
    """
    # Fatal if raster input and output are not both set
    _lena0 = (len(raster_input) == 0)
    _lenb0 = (len(raster_output) == 0)
    if _lena0 + _lenb0 == 1:
        gscript.fatal("You must set both raster input and output, or neither.")
    """

    # Fatal if bc_cell set but mask and grid are false
    if bc_cell != '':
        if (mask == '') or (pp == ''):
            gscript.fatal(
                'Mask and pour point must be set to define b.c. cell')

    # Create grid -- overlaps DEM, three cells of padding
    gscript.use_temp_region()
    reg = gscript.region()
    reg_grid_edges_sn = np.linspace(reg['s'], reg['n'], reg['rows'])
    reg_grid_edges_we = np.linspace(reg['w'], reg['e'], reg['cols'])
    g.region(vector=basin, ewres=dx, nsres=dy)
    regnew = gscript.region()
    # Use a grid ratio -- don't match exactly the desired MODFLOW resolution
    grid_ratio_ns = np.round(regnew['nsres'] / reg['nsres'])
    grid_ratio_ew = np.round(regnew['ewres'] / reg['ewres'])
    # Get S, W, and then move the unit number of grid cells over to get N and E
    # and include 3 cells of padding around the whole watershed
    _s_dist = np.abs(reg_grid_edges_sn - (regnew['s'] - 3. * regnew['nsres']))
    _s_idx = np.where(_s_dist == np.min(_s_dist))[0][0]
    _s = float(reg_grid_edges_sn[_s_idx])
    _n_grid = np.arange(_s, reg['n'] + 3 * grid_ratio_ns * reg['nsres'],
                        grid_ratio_ns * reg['nsres'])
    _n_dist = np.abs(_n_grid - (regnew['n'] + 3. * regnew['nsres']))
    _n_idx = np.where(_n_dist == np.min(_n_dist))[0][0]
    _n = float(_n_grid[_n_idx])
    _w_dist = np.abs(reg_grid_edges_we - (regnew['w'] - 3. * regnew['ewres']))
    _w_idx = np.where(_w_dist == np.min(_w_dist))[0][0]
    _w = float(reg_grid_edges_we[_w_idx])
    _e_grid = np.arange(_w, reg['e'] + 3 * grid_ratio_ew * reg['ewres'],
                        grid_ratio_ew * reg['ewres'])
    _e_dist = np.abs(_e_grid - (regnew['e'] + 3. * regnew['ewres']))
    _e_idx = np.where(_e_dist == np.min(_e_dist))[0][0]
    _e = float(_e_grid[_e_idx])
    # Finally make the region
    g.region(w=str(_w),
             e=str(_e),
             s=str(_s),
             n=str(_n),
             nsres=str(grid_ratio_ns * reg['nsres']),
             ewres=str(grid_ratio_ew * reg['ewres']))
    # And then make the grid
    v.mkgrid(map=grid, overwrite=gscript.overwrite())

    # Cell numbers (row, column, continuous ID)
    v.db_addcolumn(map=grid, columns='id int', quiet=True)
    colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns'])
    colValues = np.array(
        gscript.vector_db_select(grid, layer=1)['values'].values())
    cats = colValues[:, colNames == 'cat'].astype(int).squeeze()
    rows = colValues[:, colNames == 'row'].astype(int).squeeze()
    cols = colValues[:, colNames == 'col'].astype(int).squeeze()
    nrows = np.max(rows)
    ncols = np.max(cols)
    cats = np.ravel([cats])
    _id = np.ravel([ncols * (rows - 1) + cols])
    _id_cat = []
    for i in range(len(_id)):
        _id_cat.append((_id[i], cats[i]))
    gridTopo = VectorTopo(grid)
    gridTopo.open('rw')
    cur = gridTopo.table.conn.cursor()
    cur.executemany("update " + grid + " set id=? where cat=?", _id_cat)
    gridTopo.table.conn.commit()
    gridTopo.close()

    # Cell area
    v.db_addcolumn(map=grid, columns='area_m2', quiet=True)
    v.to_db(map=grid,
            option='area',
            units='meters',
            columns='area_m2',
            quiet=True)

    # Basin mask
    if len(mask) > 0:
        # Fine resolution region:
        g.region(n=reg['n'],
                 s=reg['s'],
                 w=reg['w'],
                 e=reg['e'],
                 nsres=reg['nsres'],
                 ewres=reg['ewres'])
        # Rasterize basin
        v.to_rast(input=basin,
                  output=mask,
                  use='val',
                  value=1,
                  overwrite=gscript.overwrite(),
                  quiet=True)
        # Coarse resolution region:
        g.region(w=str(_w),
                 e=str(_e),
                 s=str(_s),
                 n=str(_n),
                 nsres=str(grid_ratio_ns * reg['nsres']),
                 ewres=str(grid_ratio_ew * reg['ewres']))
        r.resamp_stats(input=mask,
                       output=mask,
                       method='sum',
                       overwrite=True,
                       quiet=True)
        r.mapcalc('tmp' + ' = ' + mask + ' > 0', overwrite=True, quiet=True)
        g.rename(raster=('tmp', mask), overwrite=True, quiet=True)
        r.null(map=mask, null=0, quiet=True)
        # Add mask location (1 vs 0) in the MODFLOW grid
        v.db_addcolumn(map=grid,
                       columns='basinmask double precision',
                       quiet=True)
        v.what_rast(map=grid, type='centroid', raster=mask, column='basinmask')
    """
    # Resampled raster
    if len(raster_output) > 0:
        r.resamp_stats(input=raster_input, output=raster_output, method='average', overwrite=gscript.overwrite(), quiet=True)
    """

    # Pour point
    if len(pp) > 0:
        v.db_addcolumn(map=pp,
                       columns=('row integer', 'col integer'),
                       quiet=True)
        v.build(map=pp, quiet=True)
        v.what_vect(map=pp,
                    query_map=grid,
                    column='row',
                    query_column='row',
                    quiet=True)
        v.what_vect(map=pp,
                    query_map=grid,
                    column='col',
                    query_column='col',
                    quiet=True)

    # Next point downstream of the pour point
    # Requires pp (always) and mask (sometimes)
    # Dependency set above w/ gscript.fatal
    if len(bc_cell) > 0:
        ########## NEED TO USE TRUE TEMPORARY FILE ##########
        # May not work with dx != dy!
        v.to_rast(input=pp, output='tmp', use='val', value=1, overwrite=True)
        r.buffer(input='tmp',
                 output='tmp',
                 distances=float(dx) * 1.5,
                 overwrite=True)
        r.mapcalc('tmp2 = if(tmp==2,1,null()) * ' + raster_input,
                  overwrite=True)
        g.rename(raster=('tmp2', 'tmp'), overwrite=True, quiet=True)
        #r.mapcalc('tmp = if(isnull('+raster_input+',0,(tmp == 2)))', overwrite=True)
        #g.region(rast='tmp')
        #r.null(map=raster_input,
        r.drain(input=raster_input,
                start_points=pp,
                output='tmp2',
                overwrite=True)
        r.mapcalc('tmp3 = tmp2 * tmp', overwrite=True, quiet=True)
        g.rename(raster=('tmp3', 'tmp'), overwrite=True, quiet=True)
        #r.null(map='tmp', setnull=0) # Not necessary: center point removed above
        r.to_vect(input='tmp',
                  output=bc_cell,
                  type='point',
                  column='z',
                  overwrite=gscript.overwrite(),
                  quiet=True)
        v.db_addcolumn(map=bc_cell,
                       columns=('row integer', 'col integer',
                                'x double precision', 'y double precision'),
                       quiet=True)
        v.build(map=bc_cell, quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='row', \
                    query_column='row', quiet=True)
        v.what_vect(map=bc_cell, query_map=grid, column='col', \
                    query_column='col', quiet=True)
        v.to_db(map=bc_cell, option='coor', columns=('x,y'))

        # Find out if this is diagonal: finite difference works only N-S, W-E
        colNames = np.array(gscript.vector_db_select(pp, layer=1)['columns'])
        colValues = np.array(
            gscript.vector_db_select(pp, layer=1)['values'].values())
        pp_row = int(colValues[:, colNames == 'row'].astype(int).squeeze())
        pp_col = int(colValues[:, colNames == 'col'].astype(int).squeeze())
        colNames = np.array(
            gscript.vector_db_select(bc_cell, layer=1)['columns'])
        colValues = np.array(
            gscript.vector_db_select(bc_cell, layer=1)['values'].values())
        bc_row = int(colValues[:, colNames == 'row'].astype(int).squeeze())
        bc_col = int(colValues[:, colNames == 'col'].astype(int).squeeze())
        # Also get x and y while we are at it: may be needed later
        bc_x = float(colValues[:, colNames == 'x'].astype(float).squeeze())
        bc_y = float(colValues[:, colNames == 'y'].astype(float).squeeze())
        if (bc_row != pp_row) and (bc_col != pp_col):
            # If not diagonal, two possible locations that are adjacent
            # to the pour point
            _col1, _row1 = str(bc_col), str(pp_row)
            _col2, _row2 = str(pp_col), str(bc_row)
            # Check if either of these is covered by the basin mask
            _ismask_1 = gscript.vector_db_select(grid,
                                                 layer=1,
                                                 where='(row == ' + _row1 +
                                                 ') AND (col ==' + _col1 + ')',
                                                 columns='basinmask')
            _ismask_1 = int(_ismask_1['values'].values()[0][0])
            _ismask_2 = gscript.vector_db_select(grid,
                                                 layer=1,
                                                 where='(row == ' + _row2 +
                                                 ') AND (col ==' + _col2 + ')',
                                                 columns='basinmask')
            _ismask_2 = int(_ismask_2['values'].values()[0][0])
            # If both covered by mask, error
            if _ismask_1 and _ismask_2:
                gscript.fatal(
                    'All possible b.c. cells covered by basin mask.\n\
                             Contact the developer: awickert (at) umn(.)edu')
            # Otherwise, those that keep those that are not covered by basin
            # mask and set ...
            # ... wait, do we want the point that touches as few interior
            # cells as possible?
            # maybe just try setting both and seeing what happens for now!
            else:
                # Get dx and dy
                dx = gscript.region()['ewres']
                dy = gscript.region()['nsres']
                # Build tool to handle multiple b.c. cells?
                bcvect = vector.Vector(bc_cell)
                bcvect.open('rw')
                _cat_i = 2
                if not _ismask_1:
                    # _x should always be bc_x, but writing generalized code
                    _x = bc_x + dx * (int(_col1) - bc_col)  # col 1 at w edge
                    _y = bc_y - dy * (int(_row1) - bc_row)  # row 1 at n edge
                    point0 = Point(_x, _y)
                    bcvect.write(
                        point0,
                        cat=_cat_i,
                        attrs=(None, _row1, _col1, _x, _y),
                    )
                    bcvect.table.conn.commit()
                    _cat_i += 1
                if not _ismask_2:
                    # _y should always be bc_y, but writing generalized code
                    _x = bc_x + dx * (int(_col2) - bc_col)  # col 1 at w edge
                    _y = bc_y - dy * (int(_row2) - bc_row)  # row 1 at n edge
                    point0 = Point(_x, _y)
                    bcvect.write(
                        point0,
                        cat=_cat_i,
                        attrs=(None, _row2, _col2, _x, _y),
                    )
                    bcvect.table.conn.commit()
                # Build database table and vector geometry
                bcvect.build()
                bcvect.close()

    g.region(n=reg['n'],
             s=reg['s'],
             w=reg['w'],
             e=reg['e'],
             nsres=reg['nsres'],
             ewres=reg['ewres'])
Exemplo n.º 36
0
def compute_supply(
    base,
    recreation_spectrum,
    highest_spectrum,
    base_reclassification_rules,
    reclassified_base,
    reclassified_base_title,
    flow,
    flow_map_name,
    aggregation,
    ns_resolution,
    ew_resolution,
    print_only=False,
    flow_column_name=None,
    vector=None,
    supply_filename=None,
    use_filename=None,
):
    """
     Algorithmic description of the "Contribution of Ecosysten Types"

     # FIXME
     '''
     1   B ← {0, .., m-1}     :  Set of aggregational boundaries
     2   T ← {0, .., n-1}     :  Set of land cover types
     3   WE ← 0               :  Set of weighted extents
     4   R ← 0                :  Set of fractions
     5   F ← 0
     6   MASK ← HQR           : High Quality Recreation
     7   foreach {b} ⊆ B do   : for each aggregational boundary 'b'
     8      RB ← 0
     9      foreach {t} ⊆ T do  : for each Land Type
     10         WEt ← Et * Wt   : Weighted Extent = Extent(t) * Weight(t)
     11         WE ← WE⋃{WEt}   : Add to set of Weighted Extents
     12     S ← ∑t∈WEt
     13     foreach t ← T do
     14        Rt ← WEt / ∑WE
     15        R ← R⋃{Rt}
     16     RB ← RB⋃{R}
     '''
     # FIXME

    Parameters
    ----------
    recreation_spectrum:
        Map scoring access to and quality of recreation

    highest_spectrum :
        Expected is a map of areas with highest recreational value (category 9
        as per the report ... )

    base :
        Base land types map for final zonal statistics. Specifically to
        ESTIMAP's recrceation mapping algorithm

    base_reclassification_rules :
        Reclassification rules for the input base map

    reclassified_base :
        Name for the reclassified base cover map

    reclassified_base_title :
        Title for the reclassified base map

    ecosystem_types :

    flow :
        Map of visits, derived from the mobility function, depicting the
        number of people living inside zones 0, 1, 2, 3. Used as a cover map
        for zonal statistics.

    flow_map_name :
        A name for the 'flow' map. This is required when the 'flow' input
        option is not defined by the user, yet some of the requested outputs
        required first the production of the 'flow' map. An example is the
        request for a supply table without requesting the 'flow' map itself.

    aggregation :

    ns_resolution :

    ew_resolution :

    statistics_filename :

    supply_filename :
        Name for CSV output file of the supply table

    use_filename :
        Name for CSV output file of the use table

    flow_column_name :
        Name for column to populate with 'flow' values

    vector :
        If 'vector' is given, a vector map of the 'flow' along with appropriate
        attributes will be produced.

    ? :
        Land cover class percentages in ROS9 (this is: relative percentage)

    output :
        Supply table (distribution of flow for each land cover class)

    Returns
    -------
    This function produces a map to base the production of a supply table in
    form of CSV.

    Examples
    --------
    """
    # Inputs
    flow_in_base = flow + "_" + base
    base_scores = base + ".scores"

    # Define lists and dictionaries to hold intermediate data
    statistics_dictionary = {}
    weighted_extents = {}
    flows = []

    # MASK areas of high quality recreation
    r.mask(raster=highest_spectrum, overwrite=True, quiet=True)

    # Reclassify land cover map to MAES ecosystem types
    r.reclass(
        input=base,
        rules=base_reclassification_rules,
        output=reclassified_base,
        quiet=True,
    )
    # add to "remove_at_exit" after the reclassified maps!

    # Discard areas out of MASK
    copy_equation = EQUATION.format(result=reclassified_base,
                                    expression=reclassified_base)
    r.mapcalc(copy_equation, overwrite=True)

    # Count flow within each land cover category
    r.stats_zonal(
        base=base,
        flags="r",
        cover=flow_map_name,
        method="sum",
        output=flow_in_base,
        overwrite=True,
        quiet=True,
    )

    # Set colors for "flow" map
    r.colors(map=flow_in_base, color=MOBILITY_COLORS, quiet=True)

    # Parse aggregation raster categories and labels
    categories = grass.parse_command("r.category",
                                     map=aggregation,
                                     delimiter="\t")

    for category in categories:

        # Intermediate names

        cells = highest_spectrum + ".cells" + "." + category
        remove_map_at_exit(cells)

        extent = highest_spectrum + ".extent" + "." + category
        remove_map_at_exit(extent)

        weighted = highest_spectrum + ".weighted" + "." + category
        remove_map_at_exit(weighted)

        fractions = base + ".fractions" + "." + category
        remove_map_at_exit(fractions)

        flow_category = "_flow_" + category
        flow = base + flow_category
        remove_map_at_exit(flow)

        flow_in_reclassified_base = reclassified_base + "_flow"
        flow_in_category = reclassified_base + flow_category
        flows.append(flow_in_category)  # add to list for patching
        remove_map_at_exit(flow_in_category)

        # Output names

        msg = "Processing aggregation raster category: {r}"
        msg = msg.format(r=category)
        grass.debug(_(msg))
        # g.message(_(msg))

        # First, set region to extent of the aggregation map
        # and resolution to the one of the population map
        # Note the `-a` flag to g.region: ?
        # To safely modify the region: grass.use_temp_region()  # FIXME
        g.region(
            raster=aggregation,
            nsres=ns_resolution,
            ewres=ew_resolution,
            flags="a",
            quiet=True,
        )

        msg = "|! Computational resolution matched to {raster}"
        msg = msg.format(raster=aggregation)
        grass.debug(_(msg))

        # Build MASK for current category & high quality recreation areas
        msg = "Setting category '{c}' of '{a}' as a MASK"
        grass.verbose(_(msg.format(c=category, a=aggregation)))

        masking = "if( {spectrum} == {highest_quality_category} && "
        masking += "{aggregation} == {category}, "
        masking += "1, null() )"
        masking = masking.format(
            spectrum=recreation_spectrum,
            highest_quality_category=HIGHEST_RECREATION_CATEGORY,
            aggregation=aggregation,
            category=category,
        )
        masking_equation = EQUATION.format(result="MASK", expression=masking)
        grass.mapcalc(masking_equation, overwrite=True)

        # zoom to MASK
        g.region(zoom="MASK",
                 nsres=ns_resolution,
                 ewres=ew_resolution,
                 quiet=True)

        # Count number of cells within each land category
        r.stats_zonal(
            flags="r",
            base=base,
            cover=highest_spectrum,
            method="count",
            output=cells,
            overwrite=True,
            quiet=True,
        )
        cells_categories = grass.parse_command("r.category",
                                               map=cells,
                                               delimiter="\t")
        grass.debug(_("Cells: {c}".format(c=cells_categories)))

        # Build cell category and label rules for `r.category`
        cells_rules = "\n".join([
            "{0}:{1}".format(key, value)
            for key, value in cells_categories.items()
        ])

        # Discard areas out of MASK
        copy_equation = EQUATION.format(result=cells, expression=cells)
        r.mapcalc(copy_equation, overwrite=True)

        # Reassign cell category labels
        r.category(map=cells, rules="-", stdin=cells_rules, separator=":")

        # Compute extent of each land category
        extent_expression = "@{cells} * area()"
        extent_expression = extent_expression.format(cells=cells)
        extent_equation = EQUATION.format(result=extent,
                                          expression=extent_expression)
        r.mapcalc(extent_equation, overwrite=True)

        # Write extent figures as labels
        r.stats_zonal(
            flags="r",
            base=base,
            cover=extent,
            method="average",
            output=extent,
            overwrite=True,
            verbose=False,
            quiet=True,
        )

        # Write land suitability scores as an ASCII file
        temporary_reclassified_base_map = temporary_filename(
            filename=reclassified_base)
        suitability_scores_as_labels = string_to_file(
            SUITABILITY_SCORES_LABELS,
            filename=temporary_reclassified_base_map)
        remove_files_at_exit(suitability_scores_as_labels)

        # Write scores as raster category labels
        r.reclass(
            input=base,
            output=base_scores,
            rules=suitability_scores_as_labels,
            overwrite=True,
            quiet=True,
            verbose=False,
        )
        remove_map_at_exit(base_scores)

        # Compute weighted extents
        weighted_expression = "@{extent} * float(@{scores})"
        weighted_expression = weighted_expression.format(extent=extent,
                                                         scores=base_scores)
        weighted_equation = EQUATION.format(result=weighted,
                                            expression=weighted_expression)
        r.mapcalc(weighted_equation, overwrite=True)

        # Write weighted extent figures as labels
        r.stats_zonal(
            flags="r",
            base=base,
            cover=weighted,
            method="average",
            output=weighted,
            overwrite=True,
            verbose=False,
            quiet=True,
        )

        # Get weighted extents in a dictionary
        weighted_extents = grass.parse_command("r.category",
                                               map=weighted,
                                               delimiter="\t")

        # Compute the sum of all weighted extents and add to dictionary
        category_sum = sum([
            float(x) if not math.isnan(float(x)) else 0
            for x in weighted_extents.values()
        ])
        weighted_extents["sum"] = category_sum

        # Create a map to hold fractions of each weighted extent to the sum
        # See also:
        # https://grasswiki.osgeo.org/wiki/LANDSAT#Hint:_Minimal_disk_space_copies
        r.reclass(
            input=base,
            output=fractions,
            rules="-",
            stdin="*=*",
            verbose=False,
            quiet=True,
        )

        # Compute weighted fractions of land types
        fraction_category_label = {
            key: float(value) / weighted_extents["sum"]
            for (key, value) in weighted_extents.iteritems()
            if key is not "sum"
        }

        # Build fraction category and label rules for `r.category`
        fraction_rules = "\n".join([
            "{0}:{1}".format(key, value)
            for key, value in fraction_category_label.items()
        ])

        # Set rules
        r.category(map=fractions,
                   rules="-",
                   stdin=fraction_rules,
                   separator=":")

        # Assert that sum of fractions is ~1
        fraction_categories = grass.parse_command("r.category",
                                                  map=fractions,
                                                  delimiter="\t")

        fractions_sum = sum([
            float(x) if not math.isnan(float(x)) else 0
            for x in fraction_categories.values()
        ])
        msg = "Fractions: {f}".format(f=fraction_categories)
        grass.debug(_(msg))

        # g.message(_("Sum: {:.17g}".format(fractions_sum)))
        assert abs(fractions_sum - 1) < 1.0e-6, "Sum of fractions is != 1"

        # Compute flow
        flow_expression = "@{fractions} * @{flow}"
        flow_expression = flow_expression.format(fractions=fractions,
                                                 flow=flow_in_base)
        flow_equation = EQUATION.format(result=flow,
                                        expression=flow_expression)
        r.mapcalc(flow_equation, overwrite=True)

        # Write flow figures as raster category labels
        r.stats_zonal(
            base=reclassified_base,
            flags="r",
            cover=flow,
            method="sum",
            output=flow_in_category,
            overwrite=True,
            verbose=False,
            quiet=True,
        )

        # Parse flow categories and labels
        flow_categories = grass.parse_command("r.category",
                                              map=flow_in_category,
                                              delimiter="\t")
        grass.debug(_("Flow: {c}".format(c=flow_categories)))

        # Build flow category and label rules for `r.category`
        flow_rules = "\n".join([
            "{0}:{1}".format(key, value)
            for key, value in flow_categories.items()
        ])

        # Discard areas out of MASK

        # Check here again!
        # Output patch of all flow maps?

        copy_equation = EQUATION.format(result=flow_in_category,
                                        expression=flow_in_category)
        r.mapcalc(copy_equation, overwrite=True)

        # Reassign cell category labels
        r.category(map=flow_in_category,
                   rules="-",
                   stdin=flow_rules,
                   separator=":")

        # Update title
        reclassified_base_title += " " + category
        r.support(flow_in_category, title=reclassified_base_title)

        # debugging
        # r.report(
        #     flags='hn',
        #     map=(flow_in_category),
        #     units=('k','c','p'),
        # )

        if print_only:
            r.stats(
                input=(flow_in_category),
                output="-",
                flags="nacpl",
                separator=COMMA,
                quiet=True,
            )

        if not print_only:

            if flow_column_name:
                flow_column_prefix = flow_column_name + category
            else:
                flow_column_name = "flow"
                flow_column_prefix = flow_column_name + category

            # Produce vector map(s)
            if vector:

                # The following is wrong

                # update_vector(vector=vector,
                #         raster=flow_in_category,
                #         methods=METHODS,
                #         column_prefix=flow_column_prefix)

                # What can be done?

                # Maybe update columns of an existing map from the columns of
                # the following vectorised raster map(s)
                # ?

                raster_to_vector(raster=flow_in_category,
                                 vector=flow_in_category,
                                 type="area")

            # get statistics
            dictionary = get_raster_statistics(
                map_one=aggregation,  # reclassified_base
                map_two=flow_in_category,
                separator="|",
                flags="nlcap",
            )

            # merge 'dictionary' with global 'statistics_dictionary'
            statistics_dictionary = merge_two_dictionaries(
                statistics_dictionary, dictionary)

        # It is important to remove the MASK!
        r.mask(flags="r", quiet=True)

    # FIXME

    # Add "reclassified_base" map to "remove_at_exit" here, so as to be after
    # all reclassified maps that derive from it

    # remove the map 'reclassified_base'
    # g.remove(flags='f', type='raster', name=reclassified_base, quiet=True)
    # remove_map_at_exit(reclassified_base)

    if not print_only:
        r.patch(flags="",
                input=flows,
                output=flow_in_reclassified_base,
                quiet=True)

        if vector:
            # Patch all flow vector maps in one
            v.patch(
                flags="e",
                input=flows,
                output=flow_in_reclassified_base,
                overwrite=True,
                quiet=True,
            )

        # export to csv
        if supply_filename:
            supply_filename += CSV_EXTENSION
            nested_dictionary_to_csv(supply_filename, statistics_dictionary)

        if use_filename:
            use_filename += CSV_EXTENSION
            uses = compile_use_table(statistics_dictionary)
            dictionary_to_csv(use_filename, uses)

    # Maybe return list of flow maps?  Requires unique flow map names
    return flows