def main(): """ Creates a hydrologically correct MODFLOW grid that inlcudes minimum DEM elevations for all stream cells and mean elevations everywhere else """ """ dem = 'DEM' grid = 'grid_tmp' streams = 'streams_tmp' streams_MODFLOW = 'streams_tmp_MODFLOW' DEM_MODFLOW = 'DEM_coarse' resolution = 500 """ options, flags = gscript.parser() dem = options['dem'] grid = options['grid'] streams = options['streams'] #resolution = float(options['resolution']) streams_MODFLOW = options['streams_modflow'] DEM_MODFLOW = options['dem_modflow'] # Get number of rows and columns colNames = np.array(gscript.vector_db_select(grid, layer=1)['columns']) colValues = np.array( gscript.vector_db_select(grid, layer=1)['values'].values()) cats = colValues[:, colNames == 'cat'].astype(int).squeeze() rows = colValues[:, colNames == 'row'].astype(int).squeeze() cols = colValues[:, colNames == 'col'].astype(int).squeeze() nRows = np.max(rows) nCols = np.max(cols) gscript.use_temp_region() # Set the region to capture only the channel g.region(raster=dem) v.to_rast(input=streams, output=streams_MODFLOW, use='val', value=1.0, type='line', overwrite=gscript.overwrite(), quiet=True) r.mapcalc('tmp' + " = " + streams_MODFLOW + " * " + dem, overwrite=True) g.rename(raster=('tmp', streams_MODFLOW), overwrite=True, quiet=True) g.region(vector=grid, rows=nRows, cols=nCols, quiet=True) r.resamp_stats(input=streams_MODFLOW, output=streams_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.resamp_stats(input=dem, output=DEM_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW, output=DEM_MODFLOW, overwrite=True, quiet=True)
def main(): """ Creates a hydrologically correct MODFLOW grid that inlcudes minimum DEM elevations for all stream cells and mean elevations everywhere else """ """ dem = 'DEM' grid = 'grid_tmp' streams = 'streams_tmp' streams_MODFLOW = 'streams_tmp_MODFLOW' DEM_MODFLOW = 'DEM_coarse' resolution = 500 """ options, flags = gscript.parser() dem = options['dem'] grid = options['grid'] streams = options['streams'] #resolution = float(options['resolution']) streams_MODFLOW = options['streams_modflow'] DEM_MODFLOW = options['dem_modflow'] gscript.use_temp_region() # Set the region to capture only the channel g.region(raster=dem) v.to_rast(input=streams, output=streams_MODFLOW, use='val', value=1.0, type='line', overwrite=gscript.overwrite(), quiet=True) r.mapcalc('tmp' + " = " + streams_MODFLOW + " * " + dem, overwrite=True) g.rename(raster=('tmp', streams_MODFLOW), overwrite=True, quiet=True) g.region(raster=DEM_MODFLOW, quiet=True) print "ALTERED" r.resamp_stats(input=streams_MODFLOW, output=streams_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.resamp_stats(input=dem, output=DEM_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW, output=DEM_MODFLOW, overwrite=True, quiet=True)
def main(): """ Creates a hydrologically correct MODFLOW grid that inlcudes minimum DEM elevations for all stream cells and mean elevations everywhere else """ """ dem = 'DEM' grid = 'grid_tmp' streams = 'streams_tmp' streams_MODFLOW = 'streams_tmp_MODFLOW' DEM_MODFLOW = 'DEM_coarse' resolution = 500 """ options, flags = gscript.parser() dem = options['dem'] grid = options['grid'] streams = options['streams'] resolution = float(options['resolution']) streams_MODFLOW = options['streams_modflow'] DEM_MODFLOW = options['dem_modflow'] gscript.use_temp_region() g.region(raster=dem) g.region(vector=grid) v.to_rast(input=streams, output=streams_MODFLOW, use='val', value=1.0, type='line', overwrite=gscript.overwrite(), quiet=True) r.mapcalc(streams_MODFLOW + " = " + streams_MODFLOW + " * DEM", overwrite=True) g.region(res=resolution, quiet=True) r.resamp_stats(input=streams_MODFLOW, output=streams_MODFLOW, method='minimum', overwrite=gscript.overwrite(), quiet=True) r.resamp_stats(input=dem, output=DEM_MODFLOW, method='average', overwrite=gscript.overwrite(), quiet=True) r.patch(input=streams_MODFLOW + ',' + DEM_MODFLOW, output=DEM_MODFLOW, overwrite=True, quiet=True)
def compute_supply( base, recreation_spectrum, highest_spectrum, base_reclassification_rules, reclassified_base, reclassified_base_title, flow, flow_map_name, aggregation, ns_resolution, ew_resolution, print_only=False, flow_column_name=None, vector=None, supply_filename=None, use_filename=None, ): """ Algorithmic description of the "Contribution of Ecosysten Types" # FIXME ''' 1 B ← {0, .., m-1} : Set of aggregational boundaries 2 T ← {0, .., n-1} : Set of land cover types 3 WE ← 0 : Set of weighted extents 4 R ← 0 : Set of fractions 5 F ← 0 6 MASK ← HQR : High Quality Recreation 7 foreach {b} ⊆ B do : for each aggregational boundary 'b' 8 RB ← 0 9 foreach {t} ⊆ T do : for each Land Type 10 WEt ← Et * Wt : Weighted Extent = Extent(t) * Weight(t) 11 WE ← WE⋃{WEt} : Add to set of Weighted Extents 12 S ← ∑t∈WEt 13 foreach t ← T do 14 Rt ← WEt / ∑WE 15 R ← R⋃{Rt} 16 RB ← RB⋃{R} ''' # FIXME Parameters ---------- recreation_spectrum: Map scoring access to and quality of recreation highest_spectrum : Expected is a map of areas with highest recreational value (category 9 as per the report ... ) base : Base land types map for final zonal statistics. Specifically to ESTIMAP's recrceation mapping algorithm base_reclassification_rules : Reclassification rules for the input base map reclassified_base : Name for the reclassified base cover map reclassified_base_title : Title for the reclassified base map ecosystem_types : flow : Map of visits, derived from the mobility function, depicting the number of people living inside zones 0, 1, 2, 3. Used as a cover map for zonal statistics. flow_map_name : A name for the 'flow' map. This is required when the 'flow' input option is not defined by the user, yet some of the requested outputs required first the production of the 'flow' map. An example is the request for a supply table without requesting the 'flow' map itself. aggregation : ns_resolution : ew_resolution : statistics_filename : supply_filename : Name for CSV output file of the supply table use_filename : Name for CSV output file of the use table flow_column_name : Name for column to populate with 'flow' values vector : If 'vector' is given, a vector map of the 'flow' along with appropriate attributes will be produced. ? : Land cover class percentages in ROS9 (this is: relative percentage) output : Supply table (distribution of flow for each land cover class) Returns ------- This function produces a map to base the production of a supply table in form of CSV. Examples -------- """ # Inputs flow_in_base = flow + "_" + base base_scores = base + ".scores" # Define lists and dictionaries to hold intermediate data statistics_dictionary = {} weighted_extents = {} flows = [] # MASK areas of high quality recreation r.mask(raster=highest_spectrum, overwrite=True, quiet=True) # Reclassify land cover map to MAES ecosystem types r.reclass( input=base, rules=base_reclassification_rules, output=reclassified_base, quiet=True, ) # add to "remove_at_exit" after the reclassified maps! # Discard areas out of MASK copy_equation = EQUATION.format(result=reclassified_base, expression=reclassified_base) r.mapcalc(copy_equation, overwrite=True) # Count flow within each land cover category r.stats_zonal( base=base, flags="r", cover=flow_map_name, method="sum", output=flow_in_base, overwrite=True, quiet=True, ) # Set colors for "flow" map r.colors(map=flow_in_base, color=MOBILITY_COLORS, quiet=True) # Parse aggregation raster categories and labels categories = grass.parse_command("r.category", map=aggregation, delimiter="\t") for category in categories: # Intermediate names cells = highest_spectrum + ".cells" + "." + category remove_map_at_exit(cells) extent = highest_spectrum + ".extent" + "." + category remove_map_at_exit(extent) weighted = highest_spectrum + ".weighted" + "." + category remove_map_at_exit(weighted) fractions = base + ".fractions" + "." + category remove_map_at_exit(fractions) flow_category = "_flow_" + category flow = base + flow_category remove_map_at_exit(flow) flow_in_reclassified_base = reclassified_base + "_flow" flow_in_category = reclassified_base + flow_category flows.append(flow_in_category) # add to list for patching remove_map_at_exit(flow_in_category) # Output names msg = "Processing aggregation raster category: {r}" msg = msg.format(r=category) grass.debug(_(msg)) # g.message(_(msg)) # First, set region to extent of the aggregation map # and resolution to the one of the population map # Note the `-a` flag to g.region: ? # To safely modify the region: grass.use_temp_region() # FIXME g.region( raster=aggregation, nsres=ns_resolution, ewres=ew_resolution, flags="a", quiet=True, ) msg = "|! Computational resolution matched to {raster}" msg = msg.format(raster=aggregation) grass.debug(_(msg)) # Build MASK for current category & high quality recreation areas msg = "Setting category '{c}' of '{a}' as a MASK" grass.verbose(_(msg.format(c=category, a=aggregation))) masking = "if( {spectrum} == {highest_quality_category} && " masking += "{aggregation} == {category}, " masking += "1, null() )" masking = masking.format( spectrum=recreation_spectrum, highest_quality_category=HIGHEST_RECREATION_CATEGORY, aggregation=aggregation, category=category, ) masking_equation = EQUATION.format(result="MASK", expression=masking) grass.mapcalc(masking_equation, overwrite=True) # zoom to MASK g.region(zoom="MASK", nsres=ns_resolution, ewres=ew_resolution, quiet=True) # Count number of cells within each land category r.stats_zonal( flags="r", base=base, cover=highest_spectrum, method="count", output=cells, overwrite=True, quiet=True, ) cells_categories = grass.parse_command("r.category", map=cells, delimiter="\t") grass.debug(_("Cells: {c}".format(c=cells_categories))) # Build cell category and label rules for `r.category` cells_rules = "\n".join([ "{0}:{1}".format(key, value) for key, value in cells_categories.items() ]) # Discard areas out of MASK copy_equation = EQUATION.format(result=cells, expression=cells) r.mapcalc(copy_equation, overwrite=True) # Reassign cell category labels r.category(map=cells, rules="-", stdin=cells_rules, separator=":") # Compute extent of each land category extent_expression = "@{cells} * area()" extent_expression = extent_expression.format(cells=cells) extent_equation = EQUATION.format(result=extent, expression=extent_expression) r.mapcalc(extent_equation, overwrite=True) # Write extent figures as labels r.stats_zonal( flags="r", base=base, cover=extent, method="average", output=extent, overwrite=True, verbose=False, quiet=True, ) # Write land suitability scores as an ASCII file temporary_reclassified_base_map = temporary_filename( filename=reclassified_base) suitability_scores_as_labels = string_to_file( SUITABILITY_SCORES_LABELS, filename=temporary_reclassified_base_map) remove_files_at_exit(suitability_scores_as_labels) # Write scores as raster category labels r.reclass( input=base, output=base_scores, rules=suitability_scores_as_labels, overwrite=True, quiet=True, verbose=False, ) remove_map_at_exit(base_scores) # Compute weighted extents weighted_expression = "@{extent} * float(@{scores})" weighted_expression = weighted_expression.format(extent=extent, scores=base_scores) weighted_equation = EQUATION.format(result=weighted, expression=weighted_expression) r.mapcalc(weighted_equation, overwrite=True) # Write weighted extent figures as labels r.stats_zonal( flags="r", base=base, cover=weighted, method="average", output=weighted, overwrite=True, verbose=False, quiet=True, ) # Get weighted extents in a dictionary weighted_extents = grass.parse_command("r.category", map=weighted, delimiter="\t") # Compute the sum of all weighted extents and add to dictionary category_sum = sum([ float(x) if not math.isnan(float(x)) else 0 for x in weighted_extents.values() ]) weighted_extents["sum"] = category_sum # Create a map to hold fractions of each weighted extent to the sum # See also: # https://grasswiki.osgeo.org/wiki/LANDSAT#Hint:_Minimal_disk_space_copies r.reclass( input=base, output=fractions, rules="-", stdin="*=*", verbose=False, quiet=True, ) # Compute weighted fractions of land types fraction_category_label = { key: float(value) / weighted_extents["sum"] for (key, value) in weighted_extents.iteritems() if key is not "sum" } # Build fraction category and label rules for `r.category` fraction_rules = "\n".join([ "{0}:{1}".format(key, value) for key, value in fraction_category_label.items() ]) # Set rules r.category(map=fractions, rules="-", stdin=fraction_rules, separator=":") # Assert that sum of fractions is ~1 fraction_categories = grass.parse_command("r.category", map=fractions, delimiter="\t") fractions_sum = sum([ float(x) if not math.isnan(float(x)) else 0 for x in fraction_categories.values() ]) msg = "Fractions: {f}".format(f=fraction_categories) grass.debug(_(msg)) # g.message(_("Sum: {:.17g}".format(fractions_sum))) assert abs(fractions_sum - 1) < 1.0e-6, "Sum of fractions is != 1" # Compute flow flow_expression = "@{fractions} * @{flow}" flow_expression = flow_expression.format(fractions=fractions, flow=flow_in_base) flow_equation = EQUATION.format(result=flow, expression=flow_expression) r.mapcalc(flow_equation, overwrite=True) # Write flow figures as raster category labels r.stats_zonal( base=reclassified_base, flags="r", cover=flow, method="sum", output=flow_in_category, overwrite=True, verbose=False, quiet=True, ) # Parse flow categories and labels flow_categories = grass.parse_command("r.category", map=flow_in_category, delimiter="\t") grass.debug(_("Flow: {c}".format(c=flow_categories))) # Build flow category and label rules for `r.category` flow_rules = "\n".join([ "{0}:{1}".format(key, value) for key, value in flow_categories.items() ]) # Discard areas out of MASK # Check here again! # Output patch of all flow maps? copy_equation = EQUATION.format(result=flow_in_category, expression=flow_in_category) r.mapcalc(copy_equation, overwrite=True) # Reassign cell category labels r.category(map=flow_in_category, rules="-", stdin=flow_rules, separator=":") # Update title reclassified_base_title += " " + category r.support(flow_in_category, title=reclassified_base_title) # debugging # r.report( # flags='hn', # map=(flow_in_category), # units=('k','c','p'), # ) if print_only: r.stats( input=(flow_in_category), output="-", flags="nacpl", separator=COMMA, quiet=True, ) if not print_only: if flow_column_name: flow_column_prefix = flow_column_name + category else: flow_column_name = "flow" flow_column_prefix = flow_column_name + category # Produce vector map(s) if vector: # The following is wrong # update_vector(vector=vector, # raster=flow_in_category, # methods=METHODS, # column_prefix=flow_column_prefix) # What can be done? # Maybe update columns of an existing map from the columns of # the following vectorised raster map(s) # ? raster_to_vector(raster=flow_in_category, vector=flow_in_category, type="area") # get statistics dictionary = get_raster_statistics( map_one=aggregation, # reclassified_base map_two=flow_in_category, separator="|", flags="nlcap", ) # merge 'dictionary' with global 'statistics_dictionary' statistics_dictionary = merge_two_dictionaries( statistics_dictionary, dictionary) # It is important to remove the MASK! r.mask(flags="r", quiet=True) # FIXME # Add "reclassified_base" map to "remove_at_exit" here, so as to be after # all reclassified maps that derive from it # remove the map 'reclassified_base' # g.remove(flags='f', type='raster', name=reclassified_base, quiet=True) # remove_map_at_exit(reclassified_base) if not print_only: r.patch(flags="", input=flows, output=flow_in_reclassified_base, quiet=True) if vector: # Patch all flow vector maps in one v.patch( flags="e", input=flows, output=flow_in_reclassified_base, overwrite=True, quiet=True, ) # export to csv if supply_filename: supply_filename += CSV_EXTENSION nested_dictionary_to_csv(supply_filename, statistics_dictionary) if use_filename: use_filename += CSV_EXTENSION uses = compile_use_table(statistics_dictionary) dictionary_to_csv(use_filename, uses) # Maybe return list of flow maps? Requires unique flow map names return flows
r.mapcalc(mcstr, overwrite=True) r.mapcalc("boundaries = boundaries > 0", overwrite=True) # Logical 0/1 r.null(map='boundaries', setnull=0) _x = garray.array() _x.read('x') _y = garray.array() _y.read('y') drainarray = garray.array() # Much of this will depend on experiment DEMs = gscript.parse_command('g.list', type='raster', pattern='*__DEM__*').keys() DEMs = sorted(DEMs) for DEM in DEMs: r.patch(input='boundaries,'+DEM, output='tmp', overwrite=True) drainarray.read('tmp') scanName = DEM.split('__DEM__')[0] mainThalweg = scanName + '__main_thalweg__' tribThalweg = scanName + '__trib_thalweg__' # Main channel #start_x = margin_left/1000. #start_y = _y[:,1][drainarray[:,1] == np.min(drainarray[:,1])] flowIn = garray.array() flowIn[:,2][drainarray[:,2] < (np.min(drainarray[:,2])+.01)] = 1 flowIn.write('tmpFlowIn', overwrite=True) r.watershed(elevation='tmp', flow='tmpFlowIn', threshold=np.sum(flowIn), stream='tmpStream', accumulation='tmpAccum', flags='s', overwrite=True) r.mapcalc('tmpStreamZ = (tmpStream * 0 + 1) * tmp', overwrite=True) r.to_vect(input='tmpStreamZ', output='tmpStreamLine', type='line', overwrite=True) r.to_vect(input='tmpStreamZ', output='tmpStreamPoints', type='point', column='z', overwrite=True) v.db_addcolumn(map='tmpStreamPoints', columns='x double precision, y double precision')
from grass.pygrass import utils from grass import script as gscript _x = garray.array() _x.read('x') _y = garray.array() _y.read('y') drainarray = garray.array() # Get LP with only a main channel (no trib channel) DEMs = gscript.parse_command('g.list', type='raster', pattern='*DEM_0*').keys() DEMs = sorted(DEMs) for DEM in DEMs: print DEM r.patch(input='boundaries,' + DEM, output='tmp', overwrite=True) drainarray.read('tmp') # Main channel start_x = margin_left / 1000. + .1 start_y = _y[:, 1][drainarray[:, 1] == np.min(drainarray[:, 1])] flowIn = garray.array() flowIn[:] = ((_x <= (margin_left / 1000. + .01)) * (_x > _x[0, 1])) * (_y >= 1.28) * (_y <= 1.30) flowIn.write('tmpFlowIn', overwrite=True) # Must fix here: some cells on wall at boundary r.watershed(elevation='tmp', flow='tmpFlowIn', threshold=np.sum(flowIn), stream='tmpStream', accumulation='tmpAccum', flags='s',
def main(): soillossin = options['soillossin'] soillossout = options['soillossout'] factorold = options['factorold'] factornew = options['factornew'] map = options['map'] factorcol = options['factorcol'] flag_p = flags['p'] # patch factornew with factorold flag_k = flags['k'] # calculate k-factor components from % clay p_T, silt p_U, stones p_st, humus p_H if not factornew: factors = {} if flag_k: gscript.message('Using factor derived from \ soil components.') parcelmap = Vect(map) parcelmap.open(mode='rw', layer=1) parcelmap.table.filters.select() cur = parcelmap.table.execute() col_names = [cn[0] for cn in cur.description] rows = cur.fetchall() for col in (u'Kb',u'Ks',u'Kh', u'K'): if col not in parcelmap.table.columns: parcelmap.table.columns.add(col,u'DOUBLE') for row in rows: rowid = row[1] p_T = row[7] p_U = row[8] p_st = row[9] p_H = row[10] print("Parzelle mit id %d :" %rowid) for sublist in bodenarten: # p_T and p_U if p_T in range(sublist[2],sublist[3]) \ and p_U in range(sublist[4],sublist[5]) : print('Bodenart "' + sublist[1] + '", Kb = ' + str(sublist[6])) Kb = sublist[6] break for sublist in skelettgehalte: if p_st < sublist[0]: print('Skelettgehaltsklasse bis ' + str(sublist[0]) + ' , Ks = ' + str(sublist[1])) Ks = sublist[1] break for sublist in humusgehalte: if p_H < sublist[0]: print('Humusgehaltsklasse bis ' + str(sublist[0]) + ' , Ks = ' + str(sublist[1])) Kh = sublist[1] break K = Kb * Ks * Kh print('K = ' + str(K)) if K > 0: parcelmap.table.execute("UPDATE " + parcelmap.name + " SET" + " Kb=" + str(Kb) + ", Ks=" + str(Ks) + ", Kh=" + str(Kh) + ", K=" + str(K) + " WHERE id=" + str(rowid) ) parcelmap.table.conn.commit() parcelmap.close() factorcol2 = 'K' factors['k'] = map.split('@')[0]+'.tmp.'+factorcol2 v.to_rast(input=map, use='attr', attrcolumn=factorcol2, output=factors['k']) r.null(map=factors['k'], setnull='0') if factorcol: gscript.message('Using factor from column %s of \ vector map <%s>.' % (factorcol, map) ) factors['factorcol'] = map.split('@')[0]+'.tmp.' + factorcol v.to_rast(input=map, use='attr', attrcolumn=factorcol, output=factors['factorcol']) r.null(map=factors['factorcol'], setnull='0') print factors.keys() if not 'k' in factors and not 'factorcol' in factors: gscript.fatal('Please provide either factor \ raster map or valid vector map with factor column \ (kfactor) or factor components columns (Kb, Ks, Kh)' ) #if 'k' in factors and 'factorcol' in factors: factornew = map.split('@')[0]+'.kfactor' if 'k' in factors and 'factorcol' in factors: factornew = map.split('@')[0]+'.kfactor' r.patch(input=(factors['factorcol'],factors['k']), output=factornew) elif 'k' in factors: g.copy(rast=(factors['k'],factornew)) elif 'factorcol' in factors: g.copy(rast=(factors['factorcol'],factornew)) if flag_p: #factorcorr = factorold + '.update' r.patch(input=(factornew,factorold), output=factornew) formula = soillossout + '=' + soillossin \ + '/' + factorold \ + '*' + factornew r.mapcalc(formula) r.colors(map=soillossout, raster=soillossin)
def smeasure(): gscript.message('Import <%s>' % measuremap.name) measuremap.autoimport('measures', overwrite=True, quiet=quiet, where="betrieb_id = %s" % betriebid) soillossbaremap = maps['soillossbare'] kfactormap = maps['kfactor'] if soillossbarecorrmap.exist(): gscript.message('Using updated soillossbare map.') soillossbaremap = soillossbarecorrmap kfactormap = Rast(parcelmap.name + '.kfactor') if flag_b: measurebarriermap = Vect(measuremap.name + '_barrier') v.extract(input=measuremap.name, where="barrier = 1", output=measurebarriermap.name) measurefieldblockmap = Vect(measuremap.name + '_fieldblocks') v.overlay(ainput=maps['fieldblocks'].name, binput=measurebarriermap.name,\ operator='not', output=measurefieldblockmap.name) rsoillossbare.inputs.elevation = maps['elevation'].name rsoillossbare.inputs.rfactor = maps['rfactor'].name rsoillossbare.inputs.kfactor = kfactormap.name rsoillossbare.inputs.map = measurefieldblockmap.name rsoillossbare.inputs.constant_m = '0.6' rsoillossbare.inputs.constant_n = '1.4' rsoillossbare.flags.r = True rsoillossbare(soillossbare=soillossbarebarriermap.name) soillossbaremap = soillossbarebarriermap parcelpfactor = parcelmap.name + '.pfactor' parcelcfactor = parcelmap.name + '.cfactor' v.to_rast(input=parcelmap.name, use='attr', attrcolumn='pfactor', output=parcelpfactor) v.to_rast(input=parcelmap.name, use='attr', attrcolumn='cfactor', output=parcelcfactor) measurepfactor = measuremap.name + '.pfactor' measurecfactor = measuremap.name + '.cfactor' v.to_rast(input=measuremap.name, use='attr', attrcolumn='pfactor', output=measurepfactor) v.to_rast(input=measuremap.name, use='attr', attrcolumn='cfactor', output=measurecfactor) pfactor = parcelmap.name + '.pfactor.measure' cfactor = parcelmap.name + '.cfactor.measure' r.patch(input=(measurepfactor,parcelpfactor), output=pfactor) r.patch(input=(measurecfactor,parcelcfactor), output=cfactor) rsoillossgrow.inputs.soillossbare = soillossbaremap.name rsoillossgrow.inputs.cfactor = pfactor rsoillossgrow.inputs.pfactor = cfactor rsoillossgrow(soillossgrow=soillossmeasuremap.name) rsoillossreclass(soillossmeasuremap.name, 'soillossgrow',flags='') gscript.message('Reclassified and colored maps found in <%s.3> and <%s.9> .'%(soillossmeasuremap.name, soillossmeasuremap.name)) if flag_s: gscript.message('\n \n Statistics for soilloss on grown soil <%s> : '%(soillossgrowmap)) rsoillossstats(soilloss=soillossmeasuremap.name, map=parcelmap.name, parcelnumcol='id') if not flag_c: g.copy(rast=(soillossmeasuremap.name,output)) gscript.message('Copy made to <%s> for automatic output' %(output))