def output_hillslope(method_id): """Output hillslope according different stream cell value method.""" for (tmp_row, tmp_col) in stream_coors: tmp_hillslp_ids = Hillslopes.cal_hs_codes(max_id, stream_data[tmp_row][tmp_col]) if 0 < method_id < 3: hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[method_id] # is head stream cell? if (tmp_row, tmp_col) in headstream_coors: hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[0] elif method_id == 3: hillslope_mtx[tmp_row][tmp_col] = DEFAULT_NODATA elif method_id == 4: hillslope_mtx[tmp_row][tmp_col] = 0 # Output to raster file hillslope_out_new = hillslope_out dirpath = os.path.dirname(hillslope_out_new) + os.path.sep corename = FileClass.get_core_name_without_suffix(hillslope_out_new) if method_id == 1: hillslope_out_new = dirpath + corename + '_right.tif' elif method_id == 2: hillslope_out_new = dirpath + corename + '_left.tif' elif method_id == 3: hillslope_out_new = dirpath + corename + '_nodata.tif' elif method_id == 4: hillslope_out_new = dirpath + corename + '_zero.tif' RasterUtilClass.write_gtiff_file(hillslope_out_new, nrows, ncols, hillslope_mtx, geotrans, srs, DEFAULT_NODATA, datatype)
def convert_code(in_file, out_file, in_alg='taudem', out_alg='arcgis', datatype=None): """ convert D8 flow direction code from one algorithm to another. Args: in_file: input raster file path out_file: output raster file path in_alg: available algorithms are in FlowModelConst.d8_dirs. "taudem" is the default out_alg: same as in_alg. "arcgis" is the default datatype: default is None and use the datatype of the in_file """ FileClass.check_file_exists(in_file) in_alg = in_alg.lower() out_alg = out_alg.lower() if in_alg not in FlowModelConst.d8_dirs or out_alg not in FlowModelConst.d8_dirs: raise RuntimeError('The input algorithm name should one of %s' % ', '.join(list(FlowModelConst.d8_dirs.keys()))) convert_dict = dict() in_code = FlowModelConst.d8_dirs.get(in_alg) out_code = FlowModelConst.d8_dirs.get(out_alg) assert len(in_code) == len(out_code) for i, tmp_in_code in enumerate(in_code): convert_dict[tmp_in_code] = out_code[i] if datatype is not None and datatype in GDALDataType: RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file, datatype) else: RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file)
def output_hillslope(method_id): """Output hillslope according different stream cell value method.""" for (tmp_row, tmp_col) in stream_coors: tmp_hillslp_ids = DelineateHillslope.cal_hs_codes(max_id, stream_data[tmp_row][tmp_col]) if 0 < method_id < 3: hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[method_id] # is head stream cell? if (tmp_row, tmp_col) in headstream_coors: hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[0] elif method_id == 3: hillslope_mtx[tmp_row][tmp_col] = DEFAULT_NODATA # Output to raster file hillslope_out_new = hillslope_out dirpath = os.path.dirname(hillslope_out_new) + os.path.sep corename = FileClass.get_core_name_without_suffix(hillslope_out_new) if method_id == 1: hillslope_out_new = dirpath + corename + '_right.tif' elif method_id == 2: hillslope_out_new = dirpath + corename + '_left.tif' elif method_id == 3: hillslope_out_new = dirpath + corename + '_nodata.tif' RasterUtilClass.write_gtiff_file(hillslope_out_new, nrows, ncols, hillslope_mtx, geotrans, srs, DEFAULT_NODATA, datatype)
def reclassify_landcover_parameters(landuse_file, landcover_file, landcover_initial_fields_file, landcover_lookup_file, attr_names, dst_dir): """relassify landcover_init_param parameters""" land_cover_codes = LanduseUtilClass.initialize_landcover_parameters( landuse_file, landcover_initial_fields_file, dst_dir) attr_map = LanduseUtilClass.read_crop_lookup_table(landcover_lookup_file) n = len(attr_names) replace_dicts = list() dst_crop_tifs = list() for i in range(n): cur_attr = attr_names[i] cur_dict = dict() dic = attr_map[cur_attr] for code in land_cover_codes: if MathClass.floatequal(code, DEFAULT_NODATA): continue if code not in list(cur_dict.keys()): cur_dict[code] = dic.get(code) replace_dicts.append(cur_dict) dst_crop_tifs.append(dst_dir + os.path.sep + cur_attr + '.tif') # print(replace_dicts) # print(len(replace_dicts)) # print(dst_crop_tifs) # print(len(dst_crop_tifs)) # Generate GTIFF for i, v in enumerate(dst_crop_tifs): # print(dst_crop_tifs[i]) RasterUtilClass.raster_reclassify(landcover_file, replace_dicts[i], v)
def connectdown(np, p, acc, outlet, wtsd=None, workingdir=None, mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None): """Reads an ad8 contributing area file, identifies the location of the largest ad8 value as the outlet of the largest watershed""" # If watershed is not specified, use acc to generate a mask layer. if wtsd is None or not os.path.isfile(wtsd): p, workingdir = TauDEM.check_infile_and_wp(p, workingdir) wtsd = workingdir + os.sep + 'wtsd_default.tif' RasterUtilClass.get_mask_from_raster(p, wtsd, True) fname = TauDEM.func_name('connectdown') return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir), { '-p': p, '-ad8': acc, '-w': wtsd }, workingdir, None, {'-o': outlet}, { 'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np }, { 'logfile': log_file, 'runtimefile': runtime_file })
def hydrological_radius(acc_file, radius_file, storm_probability='T2'): """Calculate hydrological radius.""" acc_r = RasterUtilClass.read_raster(acc_file) xsize = acc_r.nCols ysize = acc_r.nRows nodata_value = acc_r.noDataValue cellsize = acc_r.dx data = acc_r.data coe_table = {"T2": [0.05, 0.48], "T10": [0.12, 0.52], "T100": [0.18, 0.55]} ap = coe_table[storm_probability][0] bp = coe_table[storm_probability][1] def radius_cal(acc): """Calculate hydrological radius""" if abs(acc - nodata_value) < UTIL_ZERO: return DEFAULT_NODATA return numpy.power(ap * ((acc + 1) * cellsize * cellsize / 1000000.), bp) radius_cal_numpy = numpy.frompyfunc(radius_cal, 1, 1) radius = radius_cal_numpy(data) RasterUtilClass.write_gtiff_file(radius_file, ysize, xsize, radius, acc_r.geotrans, acc_r.srs, DEFAULT_NODATA, GDT_Float32)
def initialize_landcover_parameters(landcover_file, landcover_initial_fields_file, dst_dir): """generate initial landcover_init_param parameters""" lc_data_items = read_data_items_from_txt(landcover_initial_fields_file) # print(lc_data_items) field_names = lc_data_items[0] lu_id = -1 for i, v in enumerate(field_names): if StringClass.string_match(v, 'LANDUSE_ID'): lu_id = i break data_items = lc_data_items[1:] replace_dicts = dict() for item in data_items: for i, v in enumerate(item): if i != lu_id: if field_names[i].upper() not in list( replace_dicts.keys()): replace_dicts[field_names[i].upper()] = { float(item[lu_id]): float(v) } else: replace_dicts[field_names[i].upper()][float( item[lu_id])] = float(v) # print(replace_dicts) # Generate GTIFF for item, v in list(replace_dicts.items()): filename = dst_dir + os.path.sep + item + '.tif' print(filename) RasterUtilClass.raster_reclassify(landcover_file, v, filename) return list(replace_dicts['LANDCOVER'].values())
def ridge_without_flowin_cell(self): """Find the original ridge sources that have no flow-in cells.""" for row in range(self.nrows): for col in range(self.ncols): tempdir = self.flowdir_data[row][col] if MathClass.floatequal(tempdir, self.nodata_flow): self.rdgsrc_data[row][col] = DEFAULT_NODATA continue if self.flowmodel == 1: # Dinf flow model temp_coor = DinfUtil.downstream_index_dinf( tempdir, row, col) for temprow, tempcol in temp_coor: if 0 <= temprow < self.nrows and 0 <= tempcol < self.ncols: self.rdgsrc_data[temprow][tempcol] = DEFAULT_NODATA else: self.rdgsrc_data[row][col] = DEFAULT_NODATA else: # D8 flow model temprow, tempcol = D8Util.downstream_index( tempdir, row, col) if 0 <= temprow < self.nrows and 0 <= tempcol < self.ncols: self.rdgsrc_data[temprow][tempcol] = DEFAULT_NODATA else: self.rdgsrc_data[row][col] = DEFAULT_NODATA RasterUtilClass.write_gtiff_file(self.rdgorg, self.nrows, self.ncols, self.rdgsrc_data, self.geotrans, self.srs, DEFAULT_NODATA, 6)
def hydrological_radius(acc_file, radius_file, storm_probability='T2'): """Calculate hydrological radius.""" acc_r = RasterUtilClass.read_raster(acc_file) xsize = acc_r.nCols ysize = acc_r.nRows nodata_value = acc_r.noDataValue cellsize = acc_r.dx data = acc_r.data coe_table = { "T2": [0.05, 0.48], "T10": [0.12, 0.52], "T100": [0.18, 0.55] } ap = coe_table[storm_probability][0] bp = coe_table[storm_probability][1] def radius_cal(acc): """Calculate hydrological radius""" if abs(acc - nodata_value) < UTIL_ZERO: return DEFAULT_NODATA return numpy.power( ap * ((acc + 1) * cellsize * cellsize / 1000000.), bp) radius_cal_numpy = numpy.frompyfunc(radius_cal, 1, 1) radius = radius_cal_numpy(data) RasterUtilClass.write_gtiff_file(radius_file, ysize, xsize, radius, acc_r.geotrans, acc_r.srs, DEFAULT_NODATA, GDT_Float32)
def generate_lat_raster(cfg): """Generate latitude raster""" dem_file = cfg.spatials.filldem ds = RasterUtilClass.read_raster(dem_file) src_srs = ds.srs if not src_srs.ExportToProj4(): raise ValueError('The source raster %s has not coordinate, ' 'which is required!' % dem_file) dst_srs = osr_SpatialReference() dst_srs.ImportFromEPSG(4326) # WGS84 # dst_wkt = dst_srs.ExportToWkt() transform = osr_CoordinateTransformation(src_srs, dst_srs) point_ll = ogr_CreateGeometryFromWkt('POINT (%f %f)' % (ds.xMin, ds.yMin)) point_ur = ogr_CreateGeometryFromWkt('POINT (%f %f)' % (ds.xMax, ds.yMax)) point_ll.Transform(transform) point_ur.Transform(transform) lower_lat = point_ll.GetY() up_lat = point_ur.GetY() rows = ds.nRows cols = ds.nCols delta_lat = (up_lat - lower_lat) / float(rows) def cal_cell_lat(row, col): """calculate latitude of cell by row number""" return up_lat - (row + 0.5) * delta_lat data_lat = fromfunction(cal_cell_lat, (rows, cols)) data_lat = where(ds.validZone, data_lat, ds.data) RasterUtilClass.write_gtiff_file(cfg.spatials.cell_lat, rows, cols, data_lat, ds.geotrans, ds.srs, ds.noDataValue, GDT_Float32)
def flow_velocity(slope_file, radius_file, manning_file, velocity_file): """velocity.""" slp_r = RasterUtilClass.read_raster(slope_file) slp_data = slp_r.data xsize = slp_r.nCols ysize = slp_r.nRows nodata_value = slp_r.noDataValue rad_data = RasterUtilClass.read_raster(radius_file).data man_data = RasterUtilClass.read_raster(manning_file).data vel_max = 3.0 vel_min = 0.0001 def velocity_cal(rad, man, slp): """Calculate velocity""" if abs(slp - nodata_value) < UTIL_ZERO: return DEFAULT_NODATA # print(rad, man, slp) tmp = numpy.power(man, -1) * numpy.power( rad, 2. / 3.) * numpy.power(slp, 0.5) if tmp < vel_min: return vel_min if tmp > vel_max: return vel_max return tmp velocity_cal_numpy = numpy.frompyfunc(velocity_cal, 3, 1) velocity = velocity_cal_numpy(rad_data, man_data, slp_data) RasterUtilClass.write_gtiff_file(velocity_file, ysize, xsize, velocity, slp_r.geotrans, slp_r.srs, DEFAULT_NODATA, GDT_Float32)
def initialize_landcover_parameters(landcover_file, landcover_initial_fields_file, dst_dir): """generate initial landcover_init_param parameters""" lc_data_items = read_data_items_from_txt(landcover_initial_fields_file) # print(lc_data_items) field_names = lc_data_items[0] lu_id = -1 for i, v in enumerate(field_names): if StringClass.string_match(v, 'LANDUSE_ID'): lu_id = i break data_items = lc_data_items[1:] replace_dicts = dict() for item in data_items: for i, v in enumerate(item): if i != lu_id: if field_names[i].upper() not in list(replace_dicts.keys()): replace_dicts[field_names[i].upper()] = {float(item[lu_id]): float(v)} else: replace_dicts[field_names[i].upper()][float(item[lu_id])] = float(v) # print(replace_dicts) # Generate GTIFF for item, v in list(replace_dicts.items()): filename = dst_dir + os.path.sep + item + '.tif' print(filename) RasterUtilClass.raster_reclassify(landcover_file, v, filename) return list(replace_dicts['LANDCOVER'].values())
def reclassify_landcover_parameters(landuse_file, landcover_file, landcover_initial_fields_file, landcover_lookup_file, attr_names, dst_dir, landuse_shp): """relassify landcover_init_param parameters""" land_cover_codes = LanduseUtilClass.initialize_landcover_parameters( landuse_file, landcover_initial_fields_file, dst_dir, landuse_shp) attr_map = LanduseUtilClass.read_crop_lookup_table(landcover_lookup_file) n = len(attr_names) replace_dicts = [] replace_dicts_attrn = dict() dst_crop_tifs = [] for i in range(n): cur_attr = attr_names[i] cur_dict = dict() dic = attr_map[cur_attr] for code in land_cover_codes: if MathClass.floatequal(code, DEFAULT_NODATA): continue if code not in list(cur_dict.keys()): cur_dict[code] = dic.get(code) replace_dicts_attrn[cur_attr] = cur_dict replace_dicts.append(cur_dict) dst_crop_tifs.append(dst_dir + os.path.sep + cur_attr + '.tif') # print(replace_dicts) # print(len(replace_dicts)) # print(dst_crop_tifs) # print(len(dst_crop_tifs)) # Generate GTIFF landcover_rec_csv = r'D:\SEIMS\data\zts\data_prepare\spatial\test\landcover_rec_csv.csv' RasterUtilClass.landuse_cover_reclassify(landcover_file, landuse_shp, replace_dicts_attrn, landcover_rec_csv) print (landcover_rec_csv)
def reclassify_landcover_parameters(landuse_file, landcover_file, landcover_initial_fields_file, landcover_lookup_file, attr_names, dst_dir): """relassify landcover_init_param parameters""" land_cover_codes = LanduseUtilClass.initialize_landcover_parameters( landuse_file, landcover_initial_fields_file, dst_dir) attr_map = LanduseUtilClass.read_crop_lookup_table( landcover_lookup_file) n = len(attr_names) replace_dicts = list() dst_crop_tifs = list() for i in range(n): cur_attr = attr_names[i] cur_dict = dict() dic = attr_map[cur_attr] for code in land_cover_codes: if MathClass.floatequal(code, DEFAULT_NODATA): continue if code not in list(cur_dict.keys()): cur_dict[code] = dic.get(code) replace_dicts.append(cur_dict) dst_crop_tifs.append(dst_dir + os.path.sep + cur_attr + '.tif') # print(replace_dicts) # print(len(replace_dicts)) # print(dst_crop_tifs) # print(len(dst_crop_tifs)) # Generate GTIFF for i, v in enumerate(dst_crop_tifs): # print(dst_crop_tifs[i]) RasterUtilClass.raster_reclassify(landcover_file, replace_dicts[i], v)
def flow_velocity(slope_file, radius_file, manning_file, velocity_file): """velocity.""" slp_r = RasterUtilClass.read_raster(slope_file) slp_data = slp_r.data xsize = slp_r.nCols ysize = slp_r.nRows nodata_value = slp_r.noDataValue rad_data = RasterUtilClass.read_raster(radius_file).data man_data = RasterUtilClass.read_raster(manning_file).data vel_max = 3.0 vel_min = 0.0001 def velocity_cal(rad, man, slp): """Calculate velocity""" if abs(slp - nodata_value) < UTIL_ZERO: return DEFAULT_NODATA # print(rad, man, slp) tmp = numpy.power(man, -1) * numpy.power(rad, 2. / 3.) * numpy.power(slp, 0.5) if tmp < vel_min: return vel_min if tmp > vel_max: return vel_max return tmp velocity_cal_numpy = numpy.frompyfunc(velocity_cal, 3, 1) velocity = velocity_cal_numpy(rad_data, man_data, slp_data) RasterUtilClass.write_gtiff_file(velocity_file, ysize, xsize, velocity, slp_r.geotrans, slp_r.srs, DEFAULT_NODATA, GDT_Float32)
def pitremove_example(): """run function of TauDEM, take pitremove as an example. Compare the max, min, and average of rawdem and filled DEM. The result will be:: RawDEM: Max: 284.07, Min: 139.11, Mean: 203.92 FilledDEM: Max: 284.07, Min: 139.11, Mean: 203.93 """ dem = '../tests/data/Jamaica_dem.tif' wp = '../tests/data/tmp_results' fel = 'dem_pitremoved.tif' taudem_bin = None mpi_bin = None num_proc = 2 TauDEM.pitremove(num_proc, dem, fel, wp, mpiexedir=mpi_bin, exedir=taudem_bin) rawdem = RasterUtilClass.read_raster(dem) feldem = RasterUtilClass.read_raster(wp + os.sep + fel) print('RawDEM: Max: %.2f, Min: %.2f, Mean: %.2f' % (rawdem.get_max(), rawdem.get_min(), rawdem.get_average())) print('FilledDEM: Max: %.2f, Min: %.2f, Mean: %.2f' % (feldem.get_max(), feldem.get_min(), feldem.get_average()))
def reclassify_landcover_parameters(landuse_file, landcover_file, landcover_initial_fields_file, landcover_lookup_file, attr_names, dst_dir, landuse_shp): """relassify landcover_init_param parameters""" land_cover_codes = LanduseUtilClass.initialize_landcover_parameters( landuse_file, landcover_initial_fields_file, dst_dir, landuse_shp) attr_map = LanduseUtilClass.read_crop_lookup_table( landcover_lookup_file) n = len(attr_names) replace_dicts = [] replace_dicts_attrn = dict() dst_crop_tifs = [] for i in range(n): cur_attr = attr_names[i] cur_dict = dict() dic = attr_map[cur_attr] for code in land_cover_codes: if MathClass.floatequal(code, DEFAULT_NODATA): continue if code not in list(cur_dict.keys()): cur_dict[code] = dic.get(code) replace_dicts_attrn[cur_attr] = cur_dict replace_dicts.append(cur_dict) dst_crop_tifs.append(dst_dir + os.path.sep + cur_attr + '.tif') # print(replace_dicts) # print(len(replace_dicts)) # print(dst_crop_tifs) # print(len(dst_crop_tifs)) # Generate GTIFF landcover_rec_csv = r'D:\SEIMS\data\zts\data_prepare\spatial\test\landcover_rec_csv.csv' RasterUtilClass.landuse_cover_reclassify(landcover_file, landuse_shp, replace_dicts_attrn, landcover_rec_csv) print(landcover_rec_csv)
def flow_time_to_stream(streamlink, velocity, flow_dir_file, t0_s_file, flow_dir_code='TauDEM'): """Calculate flow time to the workflow channel from each grid cell.""" strlk_data = RasterUtilClass.read_raster(streamlink).data vel_r = RasterUtilClass.read_raster(velocity) vel_data = vel_r.data xsize = vel_r.nCols ysize = vel_r.nRows # noDataValue = vel_r.noDataValue weight = numpy.where(strlk_data <= 0, numpy.ones((ysize, xsize)), numpy.zeros((ysize, xsize))) traveltime = numpy.where(vel_r.validZone, numpy.zeros((ysize, xsize)), vel_data) flowlen = TerrainUtilClass.calculate_flow_length( flow_dir_file, weight, flow_dir_code) traveltime = numpy.where(vel_r.validZone, flowlen / (vel_data * 5. / 3.) / 3600., traveltime) RasterUtilClass.write_gtiff_file(t0_s_file, ysize, xsize, traveltime, vel_r.geotrans, vel_r.srs, DEFAULT_NODATA, GDT_Float32)
def calculate_channel_width(acc_file, chwidth_file): """Calculate channel width.""" acc_r = RasterUtilClass.read_raster(acc_file) xsize = acc_r.nCols ysize = acc_r.nRows dx = acc_r.dx cell_area = dx * dx # storm frequency a b # 2 1 0.56 # 10 1.2 0.56 # 100 1.4 0.56 a = 1.2 b = 0.56 # TODO: Figure out what's means, and move it to text.py or config.py. LJ tmp_ones = numpy.ones((ysize, xsize)) width = tmp_ones * DEFAULT_NODATA valid_values = numpy.where(acc_r.validZone, acc_r.data, tmp_ones) width = numpy.where( acc_r.validZone, numpy.power((a * (valid_values + 1) * cell_area / 1000000.), b), width) RasterUtilClass.write_gtiff_file(chwidth_file, ysize, xsize, width, acc_r.geotrans, acc_r.srs, DEFAULT_NODATA, GDT_Float32) return width
def mask_origin_delineated_data(cfg): """Mask the original delineated data by Subbasin raster.""" subbasin_tau_file = cfg.taudems.subbsn geodata2dbdir = cfg.dirs.geodata2db UtilClass.mkdir(geodata2dbdir) mask_file = cfg.spatials.mask RasterUtilClass.get_mask_from_raster(subbasin_tau_file, mask_file) # Total 12 raster files original_files = [ cfg.taudems.subbsn, cfg.taudems.d8flow, cfg.taudems.stream_raster, cfg.taudems.slp, cfg.taudems.filldem, cfg.taudems.d8acc, cfg.taudems.stream_order, cfg.taudems.dinf, cfg.taudems.dinf_d8dir, cfg.taudems.dinf_slp, cfg.taudems.dinf_weight, cfg.taudems.dist2stream_d8 ] # output masked files output_files = [ cfg.taudems.subbsn_m, cfg.taudems.d8flow_m, cfg.taudems.stream_m, cfg.spatials.slope, cfg.spatials.filldem, cfg.spatials.d8acc, cfg.spatials.stream_order, cfg.spatials.dinf, cfg.spatials.dinf_d8dir, cfg.spatials.dinf_slp, cfg.spatials.dinf_weight, cfg.spatials.dist2stream_d8 ] default_values = list() for i in range(len(original_files)): default_values.append(DEFAULT_NODATA) # other input rasters need to be masked # soil and landuse FileClass.check_file_exists(cfg.soil) FileClass.check_file_exists(cfg.landuse) original_files.append(cfg.soil) output_files.append(cfg.spatials.soil_type) default_values.append(cfg.default_soil) original_files.append(cfg.landuse) output_files.append(cfg.spatials.landuse) default_values.append(cfg.default_landuse) # Additional raster file for k, v in cfg.additional_rs.items(): org_v = v if not FileClass.is_file_exists(org_v): v = cfg.spatial_dir + os.path.sep + org_v if not FileClass.is_file_exists(v): print('WARNING: The additional file %s MUST be located in ' 'SPATIAL_DATA_DIR, or provided as full file path!' % k) continue original_files.append(v) output_files.append(cfg.dirs.geodata2db + os.path.sep + k + '.tif') default_values.append(DEFAULT_NODATA) config_file = cfg.logs.mask_cfg # run mask operation print('Mask original delineated data by Subbasin raster...') SpatialDelineation.mask_raster_cpp(cfg.seims_bin, mask_file, original_files, output_files, default_values, config_file)
def main(landusef, fieldf, fieldtxt, jsonout): """Construct hydrologically connected fields units data in JSON file format.""" # Check the file existence FileClass.check_file_exists(landusef) FileClass.check_file_exists(fieldf) FileClass.check_file_exists(fieldtxt) # read raster data and check the extent based on landuse. landuser = RasterUtilClass.read_raster(landusef) data_landuse = landuser.data nrows = landuser.nRows ncols = landuser.nCols dx = landuser.dx nodata_landuse = landuser.noDataValue fieldr = RasterUtilClass.read_raster(fieldf) if fieldr.nRows != nrows or fieldr.nCols != ncols: raise ValueError( 'The connected_fields raster MUST have the same dimensions' ' with landuse!') data_fields = fieldr.data nodata_fields = fieldr.noDataValue # Read the initial relationships between fields fields_info = read_field_relationships(fields_txt) # add landuse types and areas for m in range(nrows): for n in range(ncols): cur_lu = int(data_landuse[m][n]) cur_fld = int(data_fields[m][n]) if cur_fld == nodata_fields or cur_lu == nodata_landuse or cur_lu <= 0: continue if cur_fld not in fields_info['units']: raise ValueError( '%d is not recorded in field relationship text!' % cur_fld) if cur_lu not in fields_info['units'][cur_fld]['landuse']: fields_info['units'][cur_fld]['landuse'][cur_lu] = 1 else: fields_info['units'][cur_fld]['landuse'][cur_lu] += 1 for k, v in viewitems(fields_info['units']): area_field = 0. area_max = 0. area_max_lu = 0 for luid, luarea in viewitems(v['landuse']): v['landuse'][luid] = luarea * dx * dx * 1.e-6 area_field += v['landuse'][luid] if v['landuse'][luid] > area_max: area_max = v['landuse'][luid] area_max_lu = luid v['area'] = area_field if v['primarylanduse'] != area_max_lu: print(k, v['primarylanduse'], area_max_lu) v['primarylanduse'] = area_max_lu # save to json json_updown_data = json.dumps(fields_info, indent=4) with open(jsonout, 'w', encoding='utf-8') as f: f.write('%s' % json_updown_data)
def subbasin_boundary_cells(self, subbsn_perc): """Subbasin boundary cells that are potential ridge sources.""" dir_deltas = FlowModelConst.d8delta_ag.values() subbsn_elevs = dict() def add_elev_to_subbsn_elevs(sid, elev): if sid not in subbsn_elevs: subbsn_elevs[sid] = [elev] else: subbsn_elevs[sid].append(elev) for row in range(self.nrows): for col in range(self.ncols): if MathClass.floatequal(self.subbsn_data[row][col], self.nodata_subbsn): continue for r, c in dir_deltas: new_row = row + r new_col = col + c if 0 <= new_row < self.nrows and 0 <= new_col < self.ncols: if MathClass.floatequal( self.subbsn_data[new_row][new_col], self.nodata_subbsn): subbsnid = self.subbsn_data[row][col] self.rdgpot[row][col] = subbsnid add_elev_to_subbsn_elevs(subbsnid, self.elev_data[row][col]) elif not MathClass.floatequal( self.subbsn_data[row][col], self.subbsn_data[new_row][new_col]): subbsnid = self.subbsn_data[row][col] subbsnid2 = self.subbsn_data[new_row][new_col] self.rdgpot[row][col] = subbsnid self.rdgpot[new_row][new_col] = subbsnid2 add_elev_to_subbsn_elevs(subbsnid, self.elev_data[row][col]) add_elev_to_subbsn_elevs( subbsnid2, self.elev_data[new_row][new_col]) RasterUtilClass.write_gtiff_file(self.boundsrc, self.nrows, self.ncols, self.rdgpot, self.geotrans, self.srs, DEFAULT_NODATA, 6) subbsn_elevs_thresh = dict() for sid, elevs in list(subbsn_elevs.items()): tmpelev = numpy.array(elevs) tmpelev.sort() subbsn_elevs_thresh[sid] = tmpelev[int(len(tmpelev) * subbsn_perc)] for row in range(self.nrows): for col in range(self.ncols): if MathClass.floatequal(self.rdgpot[row][col], DEFAULT_NODATA): continue if self.elev_data[row][col] < subbsn_elevs_thresh[ self.subbsn_data[row][col]]: self.rdgpot[row][col] = DEFAULT_NODATA RasterUtilClass.write_gtiff_file(self.boundsrcfilter, self.nrows, self.ncols, self.rdgpot, self.geotrans, self.srs, DEFAULT_NODATA, 6)
def slope_rad_to_deg(tanslp, slp): """Convert slope from radius to slope.""" origin = RasterUtilClass.read_raster(tanslp) temp = origin.data == origin.noDataValue slpdata = numpy.where(temp, origin.noDataValue, numpy.arctan(origin.data) * 180. / numpy.pi) RasterUtilClass.write_gtiff_file(slp, origin.nRows, origin.nCols, slpdata, origin.geotrans, origin.srs, origin.noDataValue, origin.dataType)
def generate_runoff_coefficent(maindb, landuse_file, slope_file, soil_texture_file, runoff_coeff_file, imper_perc=0.3): """Generate potential runoff coefficient.""" # read landuselookup table from MongoDB prc_fields = ['PRC_ST%d' % (i,) for i in range(1, 13)] sc_fields = ['SC_ST%d' % (i,) for i in range(1, 13)] query_result = maindb['LANDUSELOOKUP'].find() if query_result is None: raise RuntimeError("LanduseLoop Collection is not existed or empty!") runoff_c0 = dict() runoff_s0 = dict() for row in query_result: tmpid = row.get('LANDUSE_ID') runoff_c0[tmpid] = [float(row.get(item)) for item in prc_fields] runoff_s0[tmpid] = [float(row.get(item)) for item in sc_fields] landu_raster = RasterUtilClass.read_raster(landuse_file) landu_data = landu_raster.data nodata_value1 = landu_raster.noDataValue xsize = landu_raster.nCols ysize = landu_raster.nRows nodata_value2 = landu_raster.noDataValue slo_data = RasterUtilClass.read_raster(slope_file).data soil_texture_array = RasterUtilClass.read_raster(soil_texture_file).data id_omited = list() def coef_cal(lu_id, soil_texture, slope): """Calculate runoff coefficient by landuse, soil texture and slope.""" if abs(lu_id - nodata_value1) < UTIL_ZERO or int(lu_id) < 0: return nodata_value2 if int(lu_id) not in list(runoff_c0.keys()): if int(lu_id) not in id_omited: print('The landuse ID: %d does not exist.' % int(lu_id)) id_omited.append(int(lu_id)) stid = int(soil_texture) - 1 c0 = runoff_c0[int(lu_id)][stid] s0 = runoff_s0[int(lu_id)][stid] / 100. slp = slope if slp + s0 < 0.0001: return c0 coef1 = (1 - c0) * slp / (slp + s0) coef2 = c0 + coef1 # TODO, Check if it is (lu_id >= 98), by lj if int(lu_id) == 106 or int(lu_id) == 107 or int(lu_id) == 105: return coef2 * (1 - imper_perc) + imper_perc else: return coef2 coef_cal_numpy = np_frompyfunc(coef_cal, 3, 1) coef = coef_cal_numpy(landu_data, soil_texture_array, slo_data) RasterUtilClass.write_gtiff_file(runoff_coeff_file, ysize, xsize, coef, landu_raster.geotrans, landu_raster.srs, nodata_value2, GDT_Float32)
def main(): """TEST CODE""" inf = r'C:\z_data_m\SEIMS2017\fuzslppos_ywz10m\slope_position_units\SLOPPOSITION.tif' # inr = RasterUtilClass.read_raster(inf) # inr.data[inr.data > 0] = 1. # RasterUtilClass.write_gtiff_file(inf, inr.nRows, inr.nCols, inr.data, # inr.geotrans, inr.srs, inr.noDataValue, # inr.dataType) RasterUtilClass.raster_to_gtiff(inf, inf, True, True)
def calculate_latitude_dependent_parameters(lat_file, min_dayl_file, dormhr_file, dorm_hr): """ Calculate latitude dependent parameters, include: 1. minimum daylength (daylmn), 2. day length threshold for dormancy (dormhr) """ # calculate minimum daylength, from readwgn.f of SWAT # daylength=2*acos(-tan(sd)*tan(lat))/omega # where solar declination, sd, = -23.5 degrees for minimum daylength in # northern hemisphere and -tan(sd) = .4348 # absolute value is taken of tan(lat) to convert southern hemisphere # values to northern hemisphere # the angular velocity of the earth's rotation, omega, = 15 deg/hr or # 0.2618 rad/hr and 2/0.2618 = 7.6394 cell_lat_r = RasterUtilClass.read_raster(lat_file) lat_data = cell_lat_r.data # daylmn_data = cell_lat_r.data zero = numpy.zeros((cell_lat_r.nRows, cell_lat_r.nCols)) # nodata = numpy.ones((cell_lat_r.nRows, cell_lat_r.nCols)) * cell_lat_r.noDataValue # convert degrees to radians (2pi/360=1/57.296) daylmn_data = 0.4348 * numpy.abs(numpy.tan(lat_data / 57.296)) condition = daylmn_data < 1. daylmn_data = numpy.where(condition, numpy.arccos(daylmn_data), zero) # condition2 = lat_data != cell_lat_r.noDataValue daylmn_data *= 7.6394 daylmn_data = numpy.where(cell_lat_r.validZone, daylmn_data, lat_data) RasterUtilClass.write_gtiff_file(min_dayl_file, cell_lat_r.nRows, cell_lat_r.nCols, daylmn_data, cell_lat_r.geotrans, cell_lat_r.srs, cell_lat_r.noDataValue, GDT_Float32) def cal_dorm_hr(lat): """calculate day length threshold for dormancy""" if lat == cell_lat_r.noDataValue: return cell_lat_r.noDataValue else: if 20. <= lat <= 40: return (numpy.abs(lat - 20.)) / 20. elif lat > 40.: return 1. elif lat < 20.: return -1. cal_dorm_hr_numpy = numpy.frompyfunc(cal_dorm_hr, 1, 1) # dormhr_data = numpy.copy(lat_data) if dorm_hr < -UTIL_ZERO: dormhr_data = cal_dorm_hr_numpy(lat_data) else: dormhr_data = numpy.where( cell_lat_r.validZone, numpy.ones((cell_lat_r.nRows, cell_lat_r.nCols)) * dorm_hr, lat_data) RasterUtilClass.write_gtiff_file(dormhr_file, cell_lat_r.nRows, cell_lat_r.nCols, dormhr_data, cell_lat_r.geotrans, cell_lat_r.srs, cell_lat_r.noDataValue, GDT_Float32)
def output(self, jfile, unitraster, unitshp): """output json file and slope position units raster file""" json_updown_data = json.dumps(self.units_updwon, indent=4) with open(jfile, 'w') as f: f.write(json_updown_data) RasterUtilClass.write_gtiff_file(unitraster, self.nrows, self.ncols, self.slppos_ids, self.geotrans, self.srs, DEFAULT_NODATA, self.datatype) VectorUtilClass.raster2shp(unitraster, unitshp) print("Original unique spatial units ID raster saved as '%s'" % unitraster)
def output(self, jfile, unitraster, unitshp): """output json file and slope position units raster file""" json_updown_data = json.dumps(self.units_updwon, indent=4) with open(jfile, 'w', encoding='utf-8') as f: f.write('%s' % json_updown_data) RasterUtilClass.write_gtiff_file(unitraster, self.nrows, self.ncols, self.slppos_ids, self.geotrans, self.srs, DEFAULT_NODATA, self.datatype) VectorUtilClass.raster2shp(unitraster, unitshp) print("Original unique spatial units ID raster saved as '%s'" % unitraster)
def rpi_calculation(distdown, distup, rpi_outfile): """Calculate Relative Position Index (RPI).""" down = RasterUtilClass.read_raster(distdown) up = RasterUtilClass.read_raster(distup) temp = down.data < 0 rpi_data = numpy.where(temp, down.noDataValue, down.data / (down.data + up.data)) RasterUtilClass.write_gtiff_file(rpi_outfile, down.nRows, down.nCols, rpi_data, down.geotrans, down.srs, down.noDataValue, down.dataType)
def filter_ridge_by_subbasin_boundary(self): for row in range(self.nrows): for col in range(self.ncols): if MathClass.floatequal(self.rdgsrc_data[row][col], DEFAULT_NODATA): continue if MathClass.floatequal(self.rdgpot[row][col], DEFAULT_NODATA): self.rdgsrc_data[row][col] = DEFAULT_NODATA RasterUtilClass.write_gtiff_file(self.rdgsrc, self.nrows, self.ncols, self.rdgsrc_data, self.geotrans, self.srs, DEFAULT_NODATA, 6)
def main(): """Read GeoTiff raster data and perform log transformation. """ input_tif = "../tests/data/Jamaica_dem.tif" output_tif = "../tests/data/tmp_results/log_dem.tif" rst = RasterUtilClass.read_raster(input_tif) # raster data (with noDataValue as numpy.nan) as numpy array rst_valid = rst.validValues output_data = np.log(rst_valid) # write output raster RasterUtilClass.write_gtiff_file(output_tif, rst.nRows, rst.nCols, output_data, rst.geotrans, rst.srs, rst.noDataValue, rst.dataType)
def mask_origin_delineated_data(cfg): """Mask the original delineated data by Subbasin raster.""" subbasin_tau_file = cfg.taudems.subbsn geodata2dbdir = cfg.dirs.geodata2db UtilClass.mkdir(geodata2dbdir) mask_file = cfg.spatials.mask RasterUtilClass.get_mask_from_raster(subbasin_tau_file, mask_file) # Total 12 raster files original_files = [cfg.taudems.subbsn, cfg.taudems.d8flow, cfg.taudems.stream_raster, cfg.taudems.slp, cfg.taudems.filldem, cfg.taudems.d8acc, cfg.taudems.stream_order, cfg.taudems.dinf, cfg.taudems.dinf_d8dir, cfg.taudems.dinf_slp, cfg.taudems.dinf_weight, cfg.taudems.dist2stream_d8] # output masked files output_files = [cfg.taudems.subbsn_m, cfg.taudems.d8flow_m, cfg.taudems.stream_m, cfg.spatials.slope, cfg.spatials.filldem, cfg.spatials.d8acc, cfg.spatials.stream_order, cfg.spatials.dinf, cfg.spatials.dinf_d8dir, cfg.spatials.dinf_slp, cfg.spatials.dinf_weight, cfg.spatials.dist2stream_d8] default_values = list() for i in range(len(original_files)): default_values.append(DEFAULT_NODATA) # other input rasters need to be masked # soil and landuse FileClass.check_file_exists(cfg.soil) FileClass.check_file_exists(cfg.landuse) original_files.append(cfg.soil) output_files.append(cfg.spatials.soil_type) default_values.append(cfg.default_soil) original_files.append(cfg.landuse) output_files.append(cfg.spatials.landuse) default_values.append(cfg.default_landuse) # Additional raster file for k, v in cfg.additional_rs.items(): org_v = v if not FileClass.is_file_exists(org_v): v = cfg.spatial_dir + os.path.sep + org_v if not FileClass.is_file_exists(v): print('WARNING: The additional file %s MUST be located in ' 'SPATIAL_DATA_DIR, or provided as full file path!' % k) continue original_files.append(v) output_files.append(cfg.dirs.geodata2db + os.path.sep + k + '.tif') default_values.append(DEFAULT_NODATA) config_file = cfg.logs.mask_cfg # run mask operation print('Mask original delineated data by Subbasin raster...') SpatialDelineation.mask_raster_cpp(cfg.seims_bin, mask_file, original_files, output_files, default_values, config_file)
def calculate_latitude_dependent_parameters(lat_file, min_dayl_file, dormhr_file, dorm_hr): """ Calculate latitude dependent parameters, include: 1. minimum daylength (daylmn), 2. day length threshold for dormancy (dormhr) """ # calculate minimum daylength, from readwgn.f of SWAT # daylength=2*acos(-tan(sd)*tan(lat))/omega # where solar declination, sd, = -23.5 degrees for minimum daylength in # northern hemisphere and -tan(sd) = .4348 # absolute value is taken of tan(lat) to convert southern hemisphere # values to northern hemisphere # the angular velocity of the earth's rotation, omega, = 15 deg/hr or # 0.2618 rad/hr and 2/0.2618 = 7.6394 cell_lat_r = RasterUtilClass.read_raster(lat_file) lat_data = cell_lat_r.data # daylmn_data = cell_lat_r.data zero = numpy.zeros((cell_lat_r.nRows, cell_lat_r.nCols)) # nodata = numpy.ones((cell_lat_r.nRows, cell_lat_r.nCols)) * cell_lat_r.noDataValue # convert degrees to radians (2pi/360=1/57.296) daylmn_data = 0.4348 * numpy.abs(numpy.tan(lat_data / 57.296)) condition = daylmn_data < 1. daylmn_data = numpy.where(condition, numpy.arccos(daylmn_data), zero) # condition2 = lat_data != cell_lat_r.noDataValue daylmn_data *= 7.6394 daylmn_data = numpy.where(cell_lat_r.validZone, daylmn_data, lat_data) RasterUtilClass.write_gtiff_file(min_dayl_file, cell_lat_r.nRows, cell_lat_r.nCols, daylmn_data, cell_lat_r.geotrans, cell_lat_r.srs, cell_lat_r.noDataValue, GDT_Float32) def cal_dorm_hr(lat): """calculate day length threshold for dormancy""" if lat == cell_lat_r.noDataValue: return cell_lat_r.noDataValue else: if 20. <= lat <= 40: return (numpy.abs(lat - 20.)) / 20. elif lat > 40.: return 1. elif lat < 20.: return -1. cal_dorm_hr_numpy = numpy.frompyfunc(cal_dorm_hr, 1, 1) # dormhr_data = numpy.copy(lat_data) if dorm_hr < -UTIL_ZERO: dormhr_data = cal_dorm_hr_numpy(lat_data) else: dormhr_data = numpy.where(cell_lat_r.validZone, numpy.ones((cell_lat_r.nRows, cell_lat_r.nCols)) * dorm_hr, lat_data) RasterUtilClass.write_gtiff_file(dormhr_file, cell_lat_r.nRows, cell_lat_r.nCols, dormhr_data, cell_lat_r.geotrans, cell_lat_r.srs, cell_lat_r.noDataValue, GDT_Float32)
def match_subbasin(subbsn_file, site_dict, maindb): """ Match the ID of subbasin 1. Read the coordinates of each subbasin's outlet, and the outlet ID of the whole basin (not finished yet) 2. If the isOutlet field equals to 2.1 - 0, then return the subbasin_id of the site's location 2.2 - 1, then return the outlet ID of the whole basiin 2.3 - 2, then return the outlet ID of nearest subbasin 2.4 - 3, then return the outlet IDs of the conjunct subbasins """ subbasin_raster = RasterUtilClass.read_raster(subbsn_file) localx = site_dict.get(StationFields.x) localy = site_dict.get(StationFields.y) site_type = site_dict.get(StationFields.outlet) subbasin_id = subbasin_raster.get_value_by_xy(localx, localy) if subbasin_id is None and site_type != 1: # the site is not inside the basin and not the outlet either. return False, None if site_type == 0: return True, [subbasin_id] elif site_type == 1: outid = int(MongoQuery.get_init_parameter_value(maindb, SubbsnStatsName.outlet)) return True, [outid] elif site_type == 2: return True, [subbasin_id] # TODO
def calculate_environment(self): if not self.modelrun: # no evaluate done self.economy = self.worst_econ self.environment = self.worst_env return rfile = self.modelout_dir + os.path.sep + self.bmps_info['ENVEVAL'] if not FileClass.is_file_exists(rfile): time.sleep(5) # sleep 5 seconds wait for the ouput if not FileClass.is_file_exists(rfile): print( 'WARNING: Although SEIMS model runs successfully, the desired output: %s' ' cannot be found!' % rfile) self.economy = self.worst_econ self.environment = self.worst_env return base_amount = self.bmps_info['BASE_ENV'] if StringClass.string_match(rfile.split('.')[-1], 'tif'): # Raster data rr = RasterUtilClass.read_raster(rfile) soil_erosion_amount = rr.get_sum() / self.timerange # unit: year # reduction rate of soil erosion self.environment = (base_amount - soil_erosion_amount) / base_amount elif StringClass.string_match(rfile.split('.')[-1], 'txt'): # Time series data sed_sum = read_simulation_from_txt( self.modelout_dir) # TODO, fix it later, lj self.environment = (base_amount - sed_sum) / base_amount else: self.economy = self.worst_econ self.environment = self.worst_env return
def calculate_environment(self): if not self.modelrun: # no evaluate done self.economy = self.worst_econ self.environment = self.worst_env return rfile = self.modelout_dir + os.path.sep + self.bmps_info['ENVEVAL'] if not FileClass.is_file_exists(rfile): time.sleep(5) # sleep 5 seconds wait for the ouput if not FileClass.is_file_exists(rfile): print('WARNING: Although SEIMS model runs successfully, the desired output: %s' ' cannot be found!' % rfile) self.economy = self.worst_econ self.environment = self.worst_env return base_amount = self.bmps_info['BASE_ENV'] if StringClass.string_match(rfile.split('.')[-1], 'tif'): # Raster data rr = RasterUtilClass.read_raster(rfile) soil_erosion_amount = rr.get_sum() / self.timerange # unit: year # reduction rate of soil erosion self.environment = (base_amount - soil_erosion_amount) / base_amount elif StringClass.string_match(rfile.split('.')[-1], 'txt'): # Time series data sed_sum = read_simulation_from_txt(self.modelout_dir) # TODO, fix it later, lj self.environment = (base_amount - sed_sum) / base_amount else: self.economy = self.worst_econ self.environment = self.worst_env return
def match_subbasin(subbsn_file, site_dict, maindb): """ Match the ID of subbasin 1. Read the coordinates of each subbasin's outlet, and the outlet ID of the whole basin (not finished yet) 2. If the isOutlet field equals to 2.1 - 0, then return the subbasin_id of the site's location 2.2 - 1, then return the outlet ID of the whole basiin 2.3 - 2, then return the outlet ID of nearest subbasin 2.4 - 3, then return the outlet IDs of the conjunct subbasins """ subbasin_raster = RasterUtilClass.read_raster(subbsn_file) localx = site_dict.get(StationFields.x) localy = site_dict.get(StationFields.y) site_type = site_dict.get(StationFields.outlet) subbasin_id = subbasin_raster.get_value_by_xy(localx, localy) if subbasin_id is None and site_type != 1: # the site is not inside the basin and not the outlet either. return False, None if site_type == 0: return True, [subbasin_id] elif site_type == 1: outid = int( MongoQuery.get_init_parameter_value(maindb, SubbsnStatsName.outlet)) return True, [outid] elif site_type == 2: return True, [subbasin_id] # TODO
def post_process_of_delineated_data(cfg): """Do some necessary transfer for subbasin, stream, and flow direction raster.""" # inputs stream_net_file = cfg.taudems.streamnet_shp subbasin_file = cfg.taudems.subbsn_m flow_dir_file_tau = cfg.taudems.d8flow_m stream_raster_file = cfg.taudems.stream_m # outputs # -- shapefile shp_dir = cfg.dirs.geoshp UtilClass.mkdir(shp_dir) # ---- outlet, copy from DirNameUtils.TauDEM FileClass.copy_files(cfg.taudems.outlet_m, cfg.vecs.outlet) # ---- reaches output_reach_file = cfg.vecs.reach # ---- subbasins subbasin_vector_file = cfg.vecs.subbsn # -- raster file output_subbasin_file = cfg.spatials.subbsn output_flow_dir_file = cfg.spatials.d8flow output_stream_link_file = cfg.spatials.stream_link output_hillslope_file = cfg.spatials.hillslope id_map = StreamnetUtil.serialize_streamnet(stream_net_file, output_reach_file) RasterUtilClass.raster_reclassify(subbasin_file, id_map, output_subbasin_file, GDT_Int32) StreamnetUtil.assign_stream_id_raster(stream_raster_file, output_subbasin_file, output_stream_link_file) # Convert D8 encoding rule to ArcGIS D8Util.convert_code(flow_dir_file_tau, output_flow_dir_file) # convert raster to shapefile (for subbasin and basin) print('Generating subbasin vector...') VectorUtilClass.raster2shp(output_subbasin_file, subbasin_vector_file, 'subbasin', FieldNames.subbasin_id) mask_file = cfg.spatials.mask basin_vector = cfg.vecs.bsn print('Generating basin vector...') VectorUtilClass.raster2shp(mask_file, basin_vector, 'basin', FieldNames.basin) # delineate hillslope DelineateHillslope.downstream_method_whitebox(output_stream_link_file, flow_dir_file_tau, output_hillslope_file)
def connectdown(np, p, acc, outlet, wtsd=None, workingdir=None, mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None): """Reads an ad8 contributing area file, identifies the location of the largest ad8 value as the outlet of the largest watershed""" # If watershed is not specified, use acc to generate a mask layer. if wtsd is None or not os.path.isfile(wtsd): p, workingdir = TauDEM.check_infile_and_wp(p, workingdir) wtsd = workingdir + os.sep + 'wtsd_default.tif' RasterUtilClass.get_mask_from_raster(p, wtsd, True) fname = TauDEM.func_name('connectdown') return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir), {'-p': p, '-ad8': acc, '-w': wtsd}, workingdir, None, {'-o': outlet}, {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np}, {'logfile': log_file, 'runtimefile': runtime_file})
def __init__(self, flowdirf, subbsnf, elevf, rdgsrc, flow_model=1, prop=0., ws=None): """Initialize file names.""" FileClass.check_file_exists(flowdirf) FileClass.check_file_exists(subbsnf) FileClass.check_file_exists(elevf) if ws is None: ws = os.path.basename(flowdirf) self.ws = ws if flow_model == 1: suffix = '_dinf.tif' else: suffix = '_d8.tif' self.rdgorg = self.ws + os.sep + 'RdgOrgSrc' + suffix self.boundsrc = self.ws + os.sep + 'RdgPotSrc' + suffix self.boundsrcfilter = self.ws + os.sep + 'RdgPotSrcFilter' + suffix if rdgsrc is None: rdgsrc = self.ws + os.sep + 'rdgsrc' + suffix self.rdgsrc = rdgsrc self.flowmodel = flow_model self.prop = prop # read raster data flowdir_r = RasterUtilClass.read_raster(flowdirf) self.flowdir_data = flowdir_r.data self.nrows = flowdir_r.nRows self.ncols = flowdir_r.nCols self.nodata_flow = flowdir_r.noDataValue self.geotrans = flowdir_r.geotrans self.srs = flowdir_r.srs subbsn_r = RasterUtilClass.read_raster(subbsnf) self.subbsn_data = subbsn_r.data self.nodata_subbsn = subbsn_r.noDataValue elev_r = RasterUtilClass.read_raster(elevf) self.elev_data = elev_r.data self.nodata_elev = elev_r.noDataValue # initialize output arrays self.rdgsrc_data = numpy.ones((self.nrows, self.ncols)) * 1 self.rdgpot = numpy.ones((self.nrows, self.ncols)) * DEFAULT_NODATA
def output_compressed_dinf(dinfflowang, compdinffile, weightfile): """Output compressed Dinf flow direction and weight to raster file Args: dinfflowang: Dinf flow direction raster file compdinffile: Compressed D8 flow code weightfile: The correspond weight """ dinf_r = RasterUtilClass.read_raster(dinfflowang) data = dinf_r.data xsize = dinf_r.nCols ysize = dinf_r.nRows nodata_value = dinf_r.noDataValue cal_dir_code = frompyfunc(DinfUtil.compress_dinf, 2, 3) updated_angle, dir_code, weight = cal_dir_code(data, nodata_value) RasterUtilClass.write_gtiff_file(dinfflowang, ysize, xsize, updated_angle, dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32) RasterUtilClass.write_gtiff_file(compdinffile, ysize, xsize, dir_code, dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Int16) RasterUtilClass.write_gtiff_file(weightfile, ysize, xsize, weight, dinf_r.geotrans, dinf_r.srs, DEFAULT_NODATA, GDT_Float32)
def generate_cn2(maindb, landuse_file, hydrogroup_file, cn2_filename): """Generate CN2 raster.""" query_result = maindb['LANDUSELOOKUP'].find() if query_result is None: raise RuntimeError( "LanduseLoop Collection is not existed or empty!") # cn2 list for each landuse type and hydrological soil group cn2_map = dict() for row in query_result: lu_id = row.get('LANDUSE_ID') cn2_list = [ row.get('CN2A'), row.get('CN2B'), row.get('CN2C'), row.get('CN2D') ] cn2_map[lu_id] = cn2_list # print(cn2Map) lu_r = RasterUtilClass.read_raster(landuse_file) data_landuse = lu_r.data xsize = lu_r.nCols ysize = lu_r.nRows nodata_value = lu_r.noDataValue hg_r = RasterUtilClass.read_raster(hydrogroup_file) data_hg = hg_r.data def cal_cn2(lucc_id, hg): """Calculate CN2 value from landuse ID and Hydro Group number.""" lucc_id = int(lucc_id) if lucc_id < 0 or MathClass.floatequal(lucc_id, nodata_value): return DEFAULT_NODATA else: hg = int(hg) - 1 if lucc_id not in cn2_map: print("lucc %d not existed in cn2 lookup table!" % lucc_id) return DEFAULT_NODATA return cn2_map[lucc_id][hg] cal_cn2_numpy = np_frompyfunc(cal_cn2, 2, 1) data_prop = cal_cn2_numpy(data_landuse, data_hg) RasterUtilClass.write_gtiff_file(cn2_filename, ysize, xsize, data_prop, lu_r.geotrans, lu_r.srs, nodata_value, GDT_Float32)
def assign_stream_id_raster(stream_file, subbasin_file, out_stream_file): """Assign stream link ID according to subbasin ID. Args: stream_file: input stream raster file subbasin_file: subbasin raster file out_stream_file: output stream raster file """ stream_raster = RasterUtilClass.read_raster(stream_file) stream_data = stream_raster.data nrows = stream_raster.nRows ncols = stream_raster.nCols nodata = stream_raster.noDataValue subbain_data = RasterUtilClass.read_raster(subbasin_file).data nodata_array = ones((nrows, ncols)) * DEFAULT_NODATA newstream_data = where((stream_data > 0) & (stream_data != nodata), subbain_data, nodata_array) RasterUtilClass.write_gtiff_file(out_stream_file, nrows, ncols, newstream_data, stream_raster.geotrans, stream_raster.srs, DEFAULT_NODATA, GDT_Int16)
def flow_time_to_stream(streamlink, velocity, flow_dir_file, t0_s_file, flow_dir_code='TauDEM'): """Calculate flow time to the workflow channel from each grid cell.""" strlk_data = RasterUtilClass.read_raster(streamlink).data vel_r = RasterUtilClass.read_raster(velocity) vel_data = vel_r.data xsize = vel_r.nCols ysize = vel_r.nRows # noDataValue = vel_r.noDataValue weight = numpy.where(strlk_data <= 0, numpy.ones((ysize, xsize)), numpy.zeros((ysize, xsize))) traveltime = numpy.where(vel_r.validZone, numpy.zeros((ysize, xsize)), vel_data) flowlen = TerrainUtilClass.calculate_flow_length(flow_dir_file, weight, flow_dir_code) traveltime = numpy.where(vel_r.validZone, flowlen / (vel_data * 5. / 3.) / 3600., traveltime) RasterUtilClass.write_gtiff_file(t0_s_file, ysize, xsize, traveltime, vel_r.geotrans, vel_r.srs, DEFAULT_NODATA, GDT_Float32)
def add_channel_width_to_shp(reach_shp_file, stream_link_file, width_data, default_depth=1.5): """Add channel/reach width and default depth to ESRI shapefile""" stream_link = RasterUtilClass.read_raster(stream_link_file) n_rows = stream_link.nRows n_cols = stream_link.nCols nodata_value = stream_link.noDataValue data_stream = stream_link.data ch_width_dic = dict() ch_num_dic = dict() for i in range(n_rows): for j in range(n_cols): if abs(data_stream[i][j] - nodata_value) > UTIL_ZERO: tmpid = int(data_stream[i][j]) ch_num_dic.setdefault(tmpid, 0) ch_width_dic.setdefault(tmpid, 0) ch_num_dic[tmpid] += 1 ch_width_dic[tmpid] += width_data[i][j] for k in ch_num_dic: ch_width_dic[k] /= ch_num_dic[k] # add channel width_data field to reach shp file ds_reach = ogr_Open(reach_shp_file, update=True) layer_reach = ds_reach.GetLayer(0) layer_def = layer_reach.GetLayerDefn() i_link = layer_def.GetFieldIndex(ImportReaches2Mongo._LINKNO) i_width = layer_def.GetFieldIndex(ImportReaches2Mongo._WIDTH) i_depth = layer_def.GetFieldIndex(ImportReaches2Mongo._DEPTH) if i_width < 0: new_field = ogr_FieldDefn(ImportReaches2Mongo._WIDTH, OFTReal) layer_reach.CreateField(new_field) if i_depth < 0: new_field = ogr_FieldDefn(ImportReaches2Mongo._DEPTH, OFTReal) layer_reach.CreateField(new_field) # grid_code:feature map # ftmap = {} layer_reach.ResetReading() ft = layer_reach.GetNextFeature() while ft is not None: tmpid = ft.GetFieldAsInteger(i_link) w = 1 if tmpid in list(ch_width_dic.keys()): w = ch_width_dic[tmpid] ft.SetField(ImportReaches2Mongo._WIDTH, w) ft.SetField(ImportReaches2Mongo._DEPTH, default_depth) layer_reach.SetFeature(ft) ft = layer_reach.GetNextFeature() layer_reach.SyncToDisk() ds_reach.Destroy() del ds_reach
def calculate_channel_width_depth(acc_file, chwidth_file, chdepth_file): """Calculate channel width and depth according to drainage area (km^2). The equations used in the BASINS software to estimate channel width and depth are adopted. W = 1.29 * A ^ 0.6 D = 0.13 * A ^ 0.4 where W is bankfull channel width (m), D is bankfull channel depth (m), and A is drainage area (km^2) References: Ames, D.P., Rafn, E.B., Kirk, R.V., Crosby, B., 2009. Estimation of stream channel geometry in Idaho using GIS-derived watershed characteristics. Environ. Model. Softw. 24, 444–448. https://doi.org/10.1016/j.envsoft.2008.08.008 """ acc_r = RasterUtilClass.read_raster(acc_file) xsize = acc_r.nCols ysize = acc_r.nRows dx = acc_r.dx cell_area = dx * dx * 0.000001 # m^2 ==> km^2 tmp_ones = numpy.ones((ysize, xsize)) width = tmp_ones * DEFAULT_NODATA depth = tmp_ones * DEFAULT_NODATA valid_values = numpy.where(acc_r.validZone, acc_r.data, tmp_ones) width = numpy.where(acc_r.validZone, numpy.power((1.29 * (valid_values + 1) * cell_area), 0.6), width) depth = numpy.where(acc_r.validZone, numpy.power((0.13 * (valid_values + 1) * cell_area), 0.4), depth) RasterUtilClass.write_gtiff_file(chwidth_file, ysize, xsize, width, acc_r.geotrans, acc_r.srs, DEFAULT_NODATA, GDT_Float32) RasterUtilClass.write_gtiff_file(chdepth_file, ysize, xsize, depth, acc_r.geotrans, acc_r.srs, DEFAULT_NODATA, GDT_Float32)
def generate_cn2(maindb, landuse_file, hydrogroup_file, cn2_filename): """Generate CN2 raster.""" query_result = maindb['LANDUSELOOKUP'].find() if query_result is None: raise RuntimeError("LanduseLoop Collection is not existed or empty!") # cn2 list for each landuse type and hydrological soil group cn2_map = dict() for row in query_result: lu_id = row.get('LANDUSE_ID') cn2_list = [row.get('CN2A'), row.get('CN2B'), row.get('CN2C'), row.get('CN2D')] cn2_map[lu_id] = cn2_list # print(cn2Map) lu_r = RasterUtilClass.read_raster(landuse_file) data_landuse = lu_r.data xsize = lu_r.nCols ysize = lu_r.nRows nodata_value = lu_r.noDataValue hg_r = RasterUtilClass.read_raster(hydrogroup_file) data_hg = hg_r.data def cal_cn2(lucc_id, hg): """Calculate CN2 value from landuse ID and Hydro Group number.""" lucc_id = int(lucc_id) if lucc_id < 0 or MathClass.floatequal(lucc_id, nodata_value): return DEFAULT_NODATA else: hg = int(hg) - 1 if lucc_id not in cn2_map: print("lucc %d not existed in cn2 lookup table!" % lucc_id) return DEFAULT_NODATA return cn2_map[lucc_id][hg] cal_cn2_numpy = np_frompyfunc(cal_cn2, 2, 1) data_prop = cal_cn2_numpy(data_landuse, data_hg) RasterUtilClass.write_gtiff_file(cn2_filename, ysize, xsize, data_prop, lu_r.geotrans, lu_r.srs, nodata_value, GDT_Float32)
def calculate_channel_width(acc_file, chwidth_file): """Calculate channel width.""" acc_r = RasterUtilClass.read_raster(acc_file) xsize = acc_r.nCols ysize = acc_r.nRows dx = acc_r.dx cell_area = dx * dx # storm frequency a b # 2 1 0.56 # 10 1.2 0.56 # 100 1.4 0.56 a = 1.2 b = 0.56 # TODO: Figure out what's means, and move it to text.py or config.py. LJ tmp_ones = numpy.ones((ysize, xsize)) width = tmp_ones * DEFAULT_NODATA valid_values = numpy.where(acc_r.validZone, acc_r.data, tmp_ones) width = numpy.where(acc_r.validZone, numpy.power((a * (valid_values + 1) * cell_area / 1000000.), b), width) RasterUtilClass.write_gtiff_file(chwidth_file, ysize, xsize, width, acc_r.geotrans, acc_r.srs, DEFAULT_NODATA, GDT_Float32) return width
def output_wgs84_geojson(cfg): """Convert ESRI shapefile to GeoJson based on WGS84 coordinate.""" src_srs = RasterUtilClass.read_raster(cfg.dem).srs proj_srs = src_srs.ExportToProj4() if not proj_srs: raise ValueError('The source raster %s has not ' 'coordinate, which is required!' % cfg.dem) # print(proj_srs) wgs84_srs = 'EPSG:4326' geo_json_dict = {'reach': [cfg.vecs.reach, cfg.vecs.json_reach], 'subbasin': [cfg.vecs.subbsn, cfg.vecs.json_subbsn], 'basin': [cfg.vecs.bsn, cfg.vecs.json_bsn], 'outlet': [cfg.vecs.outlet, cfg.vecs.json_outlet]} for jsonName, shp_json_list in list(geo_json_dict.items()): # delete if geojson file already existed if FileClass.is_file_exists(shp_json_list[1]): os.remove(shp_json_list[1]) VectorUtilClass.convert2geojson(shp_json_list[1], proj_srs, wgs84_srs, shp_json_list[0])
def calculate_flow_length(flow_dir_file, weight, flow_dir_code='TauDEM'): """Generate flow length with weight.""" flow_dir_raster = RasterUtilClass.read_raster(flow_dir_file) fdir_data = flow_dir_raster.data xsize = flow_dir_raster.nCols ysize = flow_dir_raster.nRows nodata_value = flow_dir_raster.noDataValue # geotransform = flow_dir_raster.srs cellsize = flow_dir_raster.dx length = numpy.zeros((ysize, xsize)) for i in range(0, ysize): for j in range(0, xsize): if abs(fdir_data[i][j] - nodata_value) < UTIL_ZERO: length[i][j] = nodata_value continue TerrainUtilClass.flow_length_cell(i, j, ysize, xsize, fdir_data, cellsize, weight, length, flow_dir_code) return length
def std_of_flow_time_to_stream(streamlink, flow_dir_file, slope, radius, velocity, delta_s_file, flow_dir_code='TauDEM'): """Generate standard deviation of t0_s (flow time to the workflow channel from each cell). """ strlk_r = RasterUtilClass.read_raster(streamlink) strlk_data = strlk_r.data rad_data = RasterUtilClass.read_raster(radius).data slo_data = RasterUtilClass.read_raster(slope).data vel_r = RasterUtilClass.read_raster(velocity) vel_data = vel_r.data xsize = vel_r.nCols ysize = vel_r.nRows nodata_value = vel_r.noDataValue def initial_variables(vel, strlk, slp, rad): """initial variables""" if abs(vel - nodata_value) < UTIL_ZERO: return DEFAULT_NODATA if strlk <= 0: tmp_weight = 1 else: tmp_weight = 0 # 0 is river if slp < 0.0005: slp = 0.0005 # dampGrid = vel * rad / (slp / 100. * 2.) # No need to divide 100 # in my view. By LJ damp_grid = vel * rad / (slp * 2.) celerity = vel * 5. / 3. tmp_weight *= damp_grid * 2. / numpy.power(celerity, 3.) return tmp_weight initial_variables_numpy = numpy.frompyfunc(initial_variables, 4, 1) weight = initial_variables_numpy(vel_data, strlk_data, slo_data, rad_data) delta_s_sqr = TerrainUtilClass.calculate_flow_length(flow_dir_file, weight, flow_dir_code) def cal_delta_s(vel, sqr): """Calculate delta s""" if abs(vel - nodata_value) < UTIL_ZERO: return nodata_value else: return sqrt(sqr) / 3600. cal_delta_s_numpy = numpy.frompyfunc(cal_delta_s, 2, 1) delta_s = cal_delta_s_numpy(vel_data, delta_s_sqr) RasterUtilClass.write_gtiff_file(delta_s_file, ysize, xsize, delta_s, strlk_r.geotrans, strlk_r.srs, DEFAULT_NODATA, GDT_Float32)
def get_subbasin_cell_count(subbsn_file, subdict=None): """Get cell number of each subbasin. Args: subbsn_file: subbasin raster file. subdict: default is None Returns: subbasin cell count dict and cell width """ wtsd_raster = RasterUtilClass.read_raster(subbsn_file) values, counts = numpy.unique(wtsd_raster.data, return_counts=True) if not subdict: subdict = dict() for v, c in zip(values, counts): if abs(v - wtsd_raster.noDataValue) < UTIL_ZERO: continue subdict[int(v)][ImportReaches2Mongo._NUMCELLS] = int(c) subdict[int(v)][ImportReaches2Mongo._AREA] = int(c) * wtsd_raster.dx ** 2 return subdict
def main(): """Read GeoTiff raster data and print statistics. The output will be:: rows: 130, cols: 100 LLCornerX: 755145.28, LLCornerY: 654294.06 cell size: 10.0 mean: 203.92, max: 284.07, min: 139.11 std: 32.32, sum: 2650967.00 """ input_tif = "../tests/data/Jamaica_dem.tif" rst = RasterUtilClass.read_raster(input_tif) # metadata information print("rows: %d, cols: %d" % (rst.nRows, rst.nCols)) print("LLCornerX: %.2f, LLCornerY: %.2f" % (rst.xMin, rst.yMin)) print("cell size: %.1f" % rst.dx) # basic statistics, nodata is excluded print("mean: %.2f, max: %.2f, min: %.2f" % (rst.get_average(), rst.get_max(), rst.get_min())) print("std: %.2f, sum: %.2f" % (rst.get_std(), rst.get_sum()))
def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False, workingdir=None, mpi_bin=None, bin_dir=None, logfile=None, runtime_file=None, hostfile=None): """Watershed Delineation.""" # 1. Check directories if not os.path.exists(dem): TauDEM.error('DEM: %s is not existed!' % dem) dem = os.path.abspath(dem) if workingdir is None: workingdir = os.path.dirname(dem) namecfg = TauDEMFilesUtils(workingdir) workingdir = namecfg.workspace UtilClass.mkdir(workingdir) # 2. Check log file if logfile is not None and FileClass.is_file_exists(logfile): os.remove(logfile) # 3. Get predefined intermediate file names filled_dem = namecfg.filldem flow_dir = namecfg.d8flow slope = namecfg.slp flow_dir_dinf = namecfg.dinf slope_dinf = namecfg.dinf_slp dir_code_dinf = namecfg.dinf_d8dir weight_dinf = namecfg.dinf_weight acc = namecfg.d8acc stream_raster = namecfg.stream_raster default_outlet = namecfg.outlet_pre modified_outlet = namecfg.outlet_m stream_skeleton = namecfg.stream_pd acc_with_weight = namecfg.d8acc_weight stream_order = namecfg.stream_order ch_network = namecfg.channel_net ch_coord = namecfg.channel_coord stream_net = namecfg.streamnet_shp subbasin = namecfg.subbsn dist2_stream_d8 = namecfg.dist2stream_d8 # 4. perform calculation UtilClass.writelog(logfile, '[Output] %d..., %s' % (10, 'pitremove DEM...'), 'a') TauDEM.pitremove(np, dem, filled_dem, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (20, 'Calculating D8 and Dinf flow direction...'), 'a') TauDEM.d8flowdir(np, filled_dem, flow_dir, slope, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) TauDEM.dinfflowdir(np, filled_dem, flow_dir_dinf, slope_dinf, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) DinfUtil.output_compressed_dinf(flow_dir_dinf, dir_code_dinf, weight_dinf) UtilClass.writelog(logfile, '[Output] %d..., %s' % (30, 'D8 flow accumulation...'), 'a') TauDEM.aread8(np, flow_dir, acc, None, None, False, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (40, 'Generating stream raster initially...'), 'a') min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(acc) TauDEM.threshold(np, acc, stream_raster, mean_accum, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (50, 'Moving outlet to stream...'), 'a') if outlet_file is None: outlet_file = default_outlet TauDEM.connectdown(np, flow_dir, acc, outlet_file, wtsd=None, workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) TauDEM.moveoutletstostrm(np, flow_dir, stream_raster, outlet_file, modified_outlet, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (60, 'Generating stream skeleton...'), 'a') TauDEM.peukerdouglas(np, filled_dem, stream_skeleton, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (70, 'Flow accumulation with outlet...'), 'a') tmp_outlet = None if singlebasin: tmp_outlet = modified_outlet TauDEM.aread8(np, flow_dir, acc_with_weight, tmp_outlet, stream_skeleton, False, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) if thresh <= 0: # find the optimal threshold using dropanalysis function UtilClass.writelog(logfile, '[Output] %d..., %s' % (75, 'Drop analysis to select optimal threshold...'), 'a') min_accum, max_accum, mean_accum, std_accum = \ RasterUtilClass.raster_statistics(acc_with_weight) if mean_accum - std_accum < 0: minthresh = mean_accum else: minthresh = mean_accum - std_accum maxthresh = mean_accum + std_accum numthresh = 20 logspace = 'true' drp_file = namecfg.drptxt TauDEM.dropanalysis(np, filled_dem, flow_dir, acc_with_weight, acc_with_weight, modified_outlet, minthresh, maxthresh, numthresh, logspace, drp_file, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) if not FileClass.is_file_exists(drp_file): raise RuntimeError('Dropanalysis failed and drp.txt was not created!') with open(drp_file, 'r', encoding='utf-8') as drpf: temp_contents = drpf.read() (beg, thresh) = temp_contents.rsplit(' ', 1) print(thresh) UtilClass.writelog(logfile, '[Output] %d..., %s' % (80, 'Generating stream raster...'), 'a') TauDEM.threshold(np, acc_with_weight, stream_raster, float(thresh), workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (90, 'Generating stream net...'), 'a') TauDEM.streamnet(np, filled_dem, flow_dir, acc_with_weight, stream_raster, modified_outlet, stream_order, ch_network, ch_coord, stream_net, subbasin, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (95, 'Calculating distance to stream (D8)...'), 'a') TauDEM.d8hdisttostrm(np, flow_dir, stream_raster, dist2_stream_d8, 1, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d.., %s' % (100, 'Original subbasin delineation is finished!'), 'a')