def split_raster(rs, split_shp, field_name, temp_dir): """Split raster by given shapefile and field name. Args: rs: origin raster file. split_shp: boundary (ESRI Shapefile) used to spilt raster. field_name: field name identify the spilt value. temp_dir: directory to store the spilt rasters. """ UtilClass.rmmkdir(temp_dir) ds = ogr_Open(split_shp) lyr = ds.GetLayer(0) lyr.ResetReading() ft = lyr.GetNextFeature() while ft: cur_field_name = ft.GetFieldAsString(field_name) for r in rs: cur_file_name = r.split(os.sep)[-1] outraster = temp_dir + os.sep + \ cur_file_name.replace('.tif', '_%s.tif' % cur_field_name.replace(' ', '_')) subprocess.call(['gdalwarp', r, outraster, '-cutline', split_shp, '-crop_to_cutline', '-cwhere', "'%s'='%s'" % (field_name, cur_field_name), '-dstnodata', '-9999']) ft = lyr.GetNextFeature() ds = None
def iuh(cfg, n_subbasins): """Invoke IUH program""" dt = 24 str_cmd = '"%s/iuh" %s %d %s %s %s %d' % ( cfg.seims_bin, cfg.hostname, cfg.port, cfg.spatial_db, DBTableNames.gridfs_spatial, dt, n_subbasins) UtilClass.run_command(str_cmd)
def __init__(self, cf, wp): self.ngens = 1 self.npop = 4 self.rsel = 0.8 self.rcross = 0.75 self.rmut = 0.1 if 'NSGA2' not in cf.sections(): raise ValueError('[NSGA2] section MUST be existed in *.ini file.') self.ngens = cf.getint('NSGA2', 'generationsnum') self.npop = cf.getint('NSGA2', 'populationsize') self.rsel = cf.getfloat('NSGA2', 'selectrate') self.rcross = cf.getfloat('NSGA2', 'crossoverrate') self.rmut = cf.getfloat('NSGA2', 'mutaterate') if self.npop % 4 != 0: raise ValueError('PopulationSize must be a multiple of 4.') self.dirname = 'Cali_NSGA2_Gen_%d_Pop_%d' % (self.ngens, self.npop) self.out_dir = wp + os.path.sep + self.dirname UtilClass.rmmkdir(self.out_dir) self.hypervlog = self.out_dir + os.path.sep + 'hypervolume.txt' self.logfile = self.out_dir + os.path.sep + 'runtime.log' self.logbookfile = self.out_dir + os.path.sep + 'logbook.txt' self.simdata_dir = self.out_dir + os.path.sep + 'simulated_data' UtilClass.rmmkdir(self.simdata_dir)
def write_autofuzslppos_config_file(ini_name, bin, wp, data_path, dem_name): UtilClass.mkdir(wp) org_ini_file = data_path + os.sep + ini_name demf = data_path + os.sep + 'inputs' + os.sep + dem_name if not os.path.isfile(org_ini_file): print('%s file is not existed!' % org_ini_file) exit(-1) dst_int_file = wp + os.sep + ini_name cfg_items = list() with open(org_ini_file, 'r') as f: for line in f.readlines(): cfg_items.append(line.strip()) # print cfg_items cfg_items.append('[REQUIRED]') cfg_items.append('exeDir = %s' % bin) cfg_items.append('rootDir = %s' % wp) cfg_items.append('rawdem = %s' % demf) with open(dst_int_file, 'w') as f: for item in cfg_items: f.write(item + '\n') cf = ConfigParser() cf.read(dst_int_file) return AutoFuzSlpPosConfig(cf)
def split_raster(rs, split_shp, field_name, temp_dir): """Split raster by given shapefile and field name. Args: rs: origin raster file. split_shp: boundary (ESRI Shapefile) used to spilt raster. field_name: field name identify the spilt value. temp_dir: directory to store the spilt rasters. """ UtilClass.rmmkdir(temp_dir) ds = ogr_Open(split_shp) lyr = ds.GetLayer(0) lyr.ResetReading() ft = lyr.GetNextFeature() while ft: cur_field_name = ft.GetFieldAsString(field_name) for r in rs: cur_file_name = r.split(os.sep)[-1] outraster = temp_dir + os.sep + \ cur_file_name.replace('.tif', '_%s.tif' % cur_field_name.replace(' ', '_')) subprocess.call(['gdalwarp', r, outraster, '-cutline', split_shp, '-crop_to_cutline', '-cwhere', "'%s'='%s'" % (field_name, cur_field_name), '-dstnodata', '-9999']) ft = lyr.GetNextFeature() ds = None
def __init__(self, cf, wp): self.ngens = 1 self.npop = 4 self.rsel = 0.8 self.rcross = 0.75 self.rmut = 0.1 if 'NSGA2' not in cf.sections(): raise ValueError('[NSGA2] section MUST be existed in *.ini file.') self.ngens = cf.getint('NSGA2', 'generationsnum') self.npop = cf.getint('NSGA2', 'populationsize') self.rsel = cf.getfloat('NSGA2', 'selectrate') self.rcross = cf.getfloat('NSGA2', 'crossoverrate') self.rmut = cf.getfloat('NSGA2', 'mutaterate') if self.npop % 4 != 0: raise ValueError('PopulationSize must be a multiple of 4.') self.dirname = 'Cali_NSGA2_Gen_%d_Pop_%d' % (self.ngens, self.npop) self.out_dir = wp + os.path.sep + self.dirname UtilClass.rmmkdir(self.out_dir) self.hypervlog = self.out_dir + os.path.sep + 'hypervolume.txt' self.logfile = self.out_dir + os.path.sep + 'runtime.log' self.logbookfile = self.out_dir + os.path.sep + 'logbook.txt' self.simdata_dir = self.out_dir + os.path.sep + 'simulated_data' UtilClass.rmmkdir(self.simdata_dir)
def write_gtiff_file(f_name, n_rows, n_cols, data, geotransform, srs, nodata_value, gdal_type=GDT_Float32): """Output Raster to GeoTiff format file. Args: f_name: output gtiff file name. n_rows: Row count. n_cols: Col count. data: 2D array data. geotransform: geographic transformation. srs: coordinate system. nodata_value: nodata value. gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type, GDT_Float32 as default. """ UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name))) driver = gdal_GetDriverByName(str('GTiff')) try: ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type) except Exception: print('Cannot create output file %s' % f_name) return ds.SetGeoTransform(geotransform) try: ds.SetProjection(srs.ExportToWkt()) except AttributeError or Exception: ds.SetProjection(srs) ds.GetRasterBand(1).SetNoDataValue(nodata_value) # if data contains numpy.nan, then replaced by nodata_value if isinstance(data, numpy.ndarray) and data.dtype in [numpy.dtype('int'), numpy.dtype('float')]: data = numpy.where(numpy.isnan(data), nodata_value, data) ds.GetRasterBand(1).WriteArray(data) ds = None
def export_landuse_lookup_files_from_mongodb(cfg, maindb): """export landuse lookup tables to txt file from MongoDB.""" lookup_dir = cfg.dirs.lookup property_namelist = ModelParamDataUtils.landuse_fields property_map = {} property_namelist.append('USLE_P') query_result = maindb['LANDUSELOOKUP'].find() if query_result is None: raise RuntimeError("LanduseLoop Collection is not existed or empty!") count = 0 for row in query_result: # print(row) value_map = dict() for i, p_name in enumerate(property_namelist): if StringClass.string_match(p_name, "USLE_P"): # Currently, USLE_P is set as 1 for all landuse. value_map[p_name] = 1 else: # I do not know why manning * 10 here. Just uncommented now. lj # if StringClass.string_match(p_name, "Manning"): # value_map[p_name] = row.get(p_name) * 10 # else: value_map[p_name] = row.get(p_name) count += 1 property_map[count] = value_map n = len(property_map) UtilClass.rmmkdir(lookup_dir) for propertyName in property_namelist: with open("%s/%s.txt" % (lookup_dir, propertyName,), 'w') as f: f.write("%d\n" % n) for prop_id in property_map: s = "%d %f\n" % (prop_id, property_map[prop_id][propertyName]) f.write(s)
def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value): """Output Raster to ASCII file. Args: filename: output ASCII filename. data: 2D array data. xsize: Col count. ysize: Row count. geotransform: geographic transformation. nodata_value: nodata_flow value. """ UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename))) header = 'NCOLS %d\n' \ 'NROWS %d\n' \ 'XLLCENTER %f\n' \ 'YLLCENTER %f\n' \ 'CELLSIZE %f\n' \ 'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1], geotransform[3] - (ysize - 0.5) * geotransform[1], geotransform[1], nodata_value) with open(filename, 'w', encoding='utf-8') as f: f.write(header) for i in range(0, ysize): for j in range(0, xsize): f.write('%s\t' % repr(data[i][j])) f.write('\n') f.close()
def __init__(self, cf, wp, dir_template='NSGA2_Gen_%d_Pop_%d'): # type: (ConfigParser, AnyStr, AnyStr) -> None """Initialization.""" self.ngens = cf.getint('NSGA2', 'generationsnum') if \ cf.has_option('NSGA2', 'generationsnum') else 1 self.npop = cf.getint('NSGA2', 'populationsize') if \ cf.has_option('NSGA2', 'populationsize') else 4 self.rsel = cf.getfloat('NSGA2', 'selectrate') if \ cf.has_option('NSGA2', 'selectrate') else 1. self.rcross = cf.getfloat('NSGA2', 'crossoverrate') if \ cf.has_option('NSGA2', 'crossoverrate') else 0.8 self.pmut = cf.getfloat('NSGA2', 'maxmutateperc') if \ cf.has_option('NSGA2', 'maxmutateperc') else 0.2 self.rmut = cf.getfloat('NSGA2', 'mutaterate') if \ cf.has_option('NSGA2', 'mutaterate') else 0.1 if self.npop % 4 != 0: raise ValueError('PopulationSize must be a multiple of 4.') if '%d' not in dir_template: dir_template += '_Gen_%d_Pop_%d' elif dir_template.count('%d') == 1: dir_template += '_Pop_%d' elif dir_template.count('%d') > 2: dir_template = 'NSGA2_Gen_%d_Pop_%d' self.dirname = dir_template % (self.ngens, self.npop) self.out_dir = wp + os.path.sep + self.dirname UtilClass.rmmkdir(self.out_dir) self.hypervlog = self.out_dir + os.path.sep + 'hypervolume.txt' self.logfile = self.out_dir + os.path.sep + 'runtime.log' self.logbookfile = self.out_dir + os.path.sep + 'logbook.txt' self.simdata_dir = self.out_dir + os.path.sep + 'simulated_data' UtilClass.rmmkdir(self.simdata_dir)
def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value): """Output Raster to ASCII file. Args: filename: output ASCII filename. data: 2D array data. xsize: Col count. ysize: Row count. geotransform: geographic transformation. nodata_value: nodata_flow value. """ UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename))) header = 'NCOLS %d\n' \ 'NROWS %d\n' \ 'XLLCENTER %f\n' \ 'YLLCENTER %f\n' \ 'CELLSIZE %f\n' \ 'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1], geotransform[3] - (ysize - 0.5) * geotransform[1], geotransform[1], nodata_value) with open(filename, 'w', encoding='utf-8') as f: f.write(header) for i in range(0, ysize): for j in range(0, xsize): f.write('%s\t' % repr(data[i][j])) f.write('\n') f.close()
def mask_origin_delineated_data(cfg): """Mask the original delineated data by Subbasin raster.""" subbasin_tau_file = cfg.taudems.subbsn geodata2dbdir = cfg.dirs.geodata2db UtilClass.mkdir(geodata2dbdir) mask_file = cfg.spatials.mask RasterUtilClass.get_mask_from_raster(subbasin_tau_file, mask_file) # Total 12 raster files original_files = [ cfg.taudems.subbsn, cfg.taudems.d8flow, cfg.taudems.stream_raster, cfg.taudems.slp, cfg.taudems.filldem, cfg.taudems.d8acc, cfg.taudems.stream_order, cfg.taudems.dinf, cfg.taudems.dinf_d8dir, cfg.taudems.dinf_slp, cfg.taudems.dinf_weight, cfg.taudems.dist2stream_d8 ] # output masked files output_files = [ cfg.taudems.subbsn_m, cfg.taudems.d8flow_m, cfg.taudems.stream_m, cfg.spatials.slope, cfg.spatials.filldem, cfg.spatials.d8acc, cfg.spatials.stream_order, cfg.spatials.dinf, cfg.spatials.dinf_d8dir, cfg.spatials.dinf_slp, cfg.spatials.dinf_weight, cfg.spatials.dist2stream_d8 ] default_values = list() for i in range(len(original_files)): default_values.append(DEFAULT_NODATA) # other input rasters need to be masked # soil and landuse FileClass.check_file_exists(cfg.soil) FileClass.check_file_exists(cfg.landuse) original_files.append(cfg.soil) output_files.append(cfg.spatials.soil_type) default_values.append(cfg.default_soil) original_files.append(cfg.landuse) output_files.append(cfg.spatials.landuse) default_values.append(cfg.default_landuse) # Additional raster file for k, v in cfg.additional_rs.items(): org_v = v if not FileClass.is_file_exists(org_v): v = cfg.spatial_dir + os.path.sep + org_v if not FileClass.is_file_exists(v): print('WARNING: The additional file %s MUST be located in ' 'SPATIAL_DATA_DIR, or provided as full file path!' % k) continue original_files.append(v) output_files.append(cfg.dirs.geodata2db + os.path.sep + k + '.tif') default_values.append(DEFAULT_NODATA) config_file = cfg.logs.mask_cfg # run mask operation print('Mask original delineated data by Subbasin raster...') SpatialDelineation.mask_raster_cpp(cfg.seims_bin, mask_file, original_files, output_files, default_values, config_file)
def status_output(status_msg, percent, file_name): """Print status and flush to file. Args: status_msg: status message percent: percentage rate of progress file_name: file handler """ UtilClass.writelog(file_name, "[Output] %d..., %s" % (percent, status_msg), 'a')
def grid_layering(cfg, n_subbasins): """Invoke grid layering program.""" layering_dir = cfg.dirs.layerinfo UtilClass.rmmkdir(layering_dir) str_cmd = '"%s/grid_layering" %s %d %s %s %s %d' % ( cfg.seims_bin, cfg.hostname, cfg.port, layering_dir, cfg.spatial_db, DBTableNames.gridfs_spatial, n_subbasins) UtilClass.run_command(str_cmd)
def grid_layering(cfg, n_subbasins): """Invoke grid layering program.""" layering_dir = cfg.dirs.layerinfo UtilClass.rmmkdir(layering_dir) str_cmd = '"%s/grid_layering" %s %d %s %s %s %d' % ( cfg.seims_bin, cfg.hostname, cfg.port, layering_dir, cfg.spatial_db, DBTableNames.gridfs_spatial, n_subbasins) UtilClass.run_command(str_cmd)
def iuh(cfg, n_subbasins): """Invoke IUH program""" dt = 24 str_cmd = '"%s/iuh" %s %d %s %s %s %d' % (cfg.seims_bin, cfg.hostname, cfg.port, cfg.spatial_db, DBTableNames.gridfs_spatial, dt, n_subbasins) # print(str_cmd) UtilClass.run_command(str_cmd)
def __init__(self, wp): """Initialization.""" self.param_defs_json = wp + os.path.sep + 'param_defs.json' self.param_values_txt = wp + os.path.sep + 'param_values.txt' self.output_values_dir = wp + os.path.sep + 'temp_output_values' self.output_values_txt = wp + os.path.sep + 'output_values.txt' self.psa_si_json = wp + os.path.sep + 'psa_si.json' self.psa_si_sort_txt = wp + os.path.sep + 'psa_si_sorted.csv' UtilClass.mkdir(self.output_values_dir)
def status_output(status_msg, percent, file_name): """Print status and flush to file. Args: status_msg: status message percent: percentage rate of progress file_name: file handler """ UtilClass.writelog(file_name, "[Output] %d..., %s" % (percent, status_msg), 'a')
def __init__(self, wp): """Initialization.""" self.param_defs_json = wp + os.path.sep + 'param_defs.json' self.param_values_txt = wp + os.path.sep + 'param_values.txt' self.output_values_dir = wp + os.path.sep + 'temp_output_values' self.output_values_txt = wp + os.path.sep + 'output_values.txt' self.psa_si_json = wp + os.path.sep + 'psa_si.json' self.psa_si_sort_txt = wp + os.path.sep + 'psa_si_sorted.csv' UtilClass.mkdir(self.output_values_dir)
def __init__(self, cf, method='morris'): """Initialization.""" self.method = method # 1. SEIMS model related self.model = ParseSEIMSConfig(cf) # 2. Common settings of parameters sensitivity analysis if 'PSA_Settings' not in cf.sections(): raise ValueError( "[PSA_Settings] section MUST be existed in *.ini file.") self.evaluate_params = list() if cf.has_option('PSA_Settings', 'evaluateparam'): eva_str = cf.get('PSA_Settings', 'evaluateparam') self.evaluate_params = StringClass.split_string(eva_str, ',') else: self.evaluate_params = ['Q'] # Default self.param_range_def = 'morris_param_rng.def' # Default if cf.has_option('PSA_Settings', 'paramrngdef'): self.param_range_def = cf.get('PSA_Settings', 'paramrngdef') self.param_range_def = self.model.model_dir + os.path.sep + self.param_range_def if not FileClass.is_file_exists(self.param_range_def): raise IOError('Ranges of parameters MUST be provided!') if not (cf.has_option('PSA_Settings', 'psa_time_start') and cf.has_option('PSA_Settings', 'psa_time_end')): raise ValueError( "Start and end time of PSA MUST be specified in [PSA_Settings]." ) try: # UTCTIME tstart = cf.get('PSA_Settings', 'psa_time_start') tend = cf.get('PSA_Settings', 'psa_time_end') self.psa_stime = StringClass.get_datetime(tstart) self.psa_etime = StringClass.get_datetime(tend) except ValueError: raise ValueError('The time format MUST be"YYYY-MM-DD HH:MM:SS".') if self.psa_stime >= self.psa_etime: raise ValueError("Wrong time settings in [PSA_Settings]!") # 3. Parameters settings for specific sensitivity analysis methods self.morris = None self.fast = None if self.method == 'fast': self.fast = FASTConfig(cf) self.psa_outpath = '%s/PSA_FAST_N%dM%d' % ( self.model.model_dir, self.fast.N, self.fast.M) elif self.method == 'morris': self.morris = MorrisConfig(cf) self.psa_outpath = '%s/PSA_Morris_N%dL%d' % ( self.model.model_dir, self.morris.N, self.morris.num_levels) # 4. (Optional) Plot settings for matplotlib self.plot_cfg = PlotConfig(cf) # Do not remove psa_outpath if already existed UtilClass.mkdir(self.psa_outpath) self.outfiles = PSAOutputs(self.psa_outpath)
def save_png_eps(plot, wp, name): """Save figures, both png and eps formats""" eps_dir = wp + os.path.sep + 'eps' pdf_dir = wp + os.path.sep + 'pdf' UtilClass.mkdir(eps_dir) UtilClass.mkdir(pdf_dir) for figpath in [wp + os.path.sep + name + '.png', eps_dir + os.path.sep + name + '.eps', pdf_dir + os.path.sep + name + '.pdf']: plot.savefig(figpath, dpi=300)
def spatial_rasters(cfg, subbasin_num): """Import spatial raster data.""" if subbasin_num == 0: # the whole basin! subbasin_file = cfg.spatials.mask else: subbasin_file = cfg.spatials.subbsn str_cmd = '"%s/import_raster" %s %s %s %s %s %d' % ( cfg.seims_bin, subbasin_file, cfg.dirs.geodata2db, cfg.spatial_db, DBTableNames.gridfs_spatial, cfg.hostname, cfg.port) UtilClass.run_command(str_cmd)
def status_output(status_msg, percent, file_name): # type: (AnyStr, Union[int, float], AnyStr) -> None """Print status and flush to file. Args: status_msg: status message percent: percentage rate of progress file_name: file name """ UtilClass.writelog(file_name, "[Output] %d..., %s" % (percent, status_msg), 'a')
def __init__(self, cf, method='morris'): """Initialization.""" self.method = method # 1. SEIMS model related self.model = ParseSEIMSConfig(cf) # 2. Common settings of parameters sensitivity analysis if 'PSA_Settings' not in cf.sections(): raise ValueError("[PSA_Settings] section MUST be existed in *.ini file.") self.evaluate_params = list() if cf.has_option('PSA_Settings', 'evaluateparam'): eva_str = cf.get('PSA_Settings', 'evaluateparam') self.evaluate_params = StringClass.split_string(eva_str, ',') else: self.evaluate_params = ['Q'] # Default self.param_range_def = 'morris_param_rng.def' # Default if cf.has_option('PSA_Settings', 'paramrngdef'): self.param_range_def = cf.get('PSA_Settings', 'paramrngdef') self.param_range_def = self.model.model_dir + os.path.sep + self.param_range_def if not FileClass.is_file_exists(self.param_range_def): raise IOError('Ranges of parameters MUST be provided!') if not (cf.has_option('PSA_Settings', 'psa_time_start') and cf.has_option('PSA_Settings', 'psa_time_end')): raise ValueError("Start and end time of PSA MUST be specified in [PSA_Settings].") try: # UTCTIME tstart = cf.get('PSA_Settings', 'psa_time_start') tend = cf.get('PSA_Settings', 'psa_time_end') self.psa_stime = StringClass.get_datetime(tstart) self.psa_etime = StringClass.get_datetime(tend) except ValueError: raise ValueError('The time format MUST be"YYYY-MM-DD HH:MM:SS".') if self.psa_stime >= self.psa_etime: raise ValueError("Wrong time settings in [PSA_Settings]!") # 3. Parameters settings for specific sensitivity analysis methods self.morris = None self.fast = None if self.method == 'fast': self.fast = FASTConfig(cf) self.psa_outpath = '%s/PSA-FAST-N%dM%d' % (self.model.model_dir, self.fast.N, self.fast.M) elif self.method == 'morris': self.morris = MorrisConfig(cf) self.psa_outpath = '%s/PSA-Morris-N%dL%dJ%d' % (self.model.model_dir, self.morris.N, self.morris.num_levels, self.morris.grid_jump) # Do not remove psa_outpath if already existed UtilClass.mkdir(self.psa_outpath) self.outfiles = PSAOutputs(self.psa_outpath)
def original_delineation(cfg): """Original Delineation by calling TauDEM functions""" # Check directories UtilClass.mkdir(cfg.workspace) UtilClass.mkdir(cfg.dirs.log) bin_dir = cfg.seims_bin mpi_bin = cfg.mpi_bin np = cfg.np TauDEMWorkflow.watershed_delineation(np, cfg.dem, cfg.outlet_file, cfg.d8acc_threshold, True, cfg.dirs.taudem, mpi_bin, bin_dir, cfg.logs.delineation)
def mask_origin_delineated_data(cfg): """Mask the original delineated data by Subbasin raster.""" subbasin_tau_file = cfg.taudems.subbsn geodata2dbdir = cfg.dirs.geodata2db UtilClass.mkdir(geodata2dbdir) mask_file = cfg.spatials.mask RasterUtilClass.get_mask_from_raster(subbasin_tau_file, mask_file) # Total 12 raster files original_files = [cfg.taudems.subbsn, cfg.taudems.d8flow, cfg.taudems.stream_raster, cfg.taudems.slp, cfg.taudems.filldem, cfg.taudems.d8acc, cfg.taudems.stream_order, cfg.taudems.dinf, cfg.taudems.dinf_d8dir, cfg.taudems.dinf_slp, cfg.taudems.dinf_weight, cfg.taudems.dist2stream_d8] # output masked files output_files = [cfg.taudems.subbsn_m, cfg.taudems.d8flow_m, cfg.taudems.stream_m, cfg.spatials.slope, cfg.spatials.filldem, cfg.spatials.d8acc, cfg.spatials.stream_order, cfg.spatials.dinf, cfg.spatials.dinf_d8dir, cfg.spatials.dinf_slp, cfg.spatials.dinf_weight, cfg.spatials.dist2stream_d8] default_values = list() for i in range(len(original_files)): default_values.append(DEFAULT_NODATA) # other input rasters need to be masked # soil and landuse FileClass.check_file_exists(cfg.soil) FileClass.check_file_exists(cfg.landuse) original_files.append(cfg.soil) output_files.append(cfg.spatials.soil_type) default_values.append(cfg.default_soil) original_files.append(cfg.landuse) output_files.append(cfg.spatials.landuse) default_values.append(cfg.default_landuse) # Additional raster file for k, v in cfg.additional_rs.items(): org_v = v if not FileClass.is_file_exists(org_v): v = cfg.spatial_dir + os.path.sep + org_v if not FileClass.is_file_exists(v): print('WARNING: The additional file %s MUST be located in ' 'SPATIAL_DATA_DIR, or provided as full file path!' % k) continue original_files.append(v) output_files.append(cfg.dirs.geodata2db + os.path.sep + k + '.tif') default_values.append(DEFAULT_NODATA) config_file = cfg.logs.mask_cfg # run mask operation print('Mask original delineated data by Subbasin raster...') SpatialDelineation.mask_raster_cpp(cfg.seims_bin, mask_file, original_files, output_files, default_values, config_file)
def convert2geojson(jsonfile, src_srs, dst_srs, src_file): """convert shapefile to geojson file""" if os.path.exists(jsonfile): os.remove(jsonfile) if sysstr == 'Windows': exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix else: exepath = FileClass.get_executable_fullpath('ogr2ogr') # os.system(s) s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % ( exepath, src_srs, dst_srs, jsonfile, src_file) UtilClass.run_command(s)
def log(lines, log_file=None): """Output log message.""" err = False for line in lines: print (line) if log_file is not None: UtilClass.writelog(log_file, line, 'append') if 'BAD TERMINATION' in line.upper(): err = True break if err: TauDEM.error("Error occurred when calling TauDEM function, please check!", log_file)
def original_delineation(cfg): """Original Delineation by calling TauDEM functions""" # Check directories UtilClass.mkdir(cfg.workspace) UtilClass.mkdir(cfg.dirs.log) bin_dir = cfg.seims_bin mpi_bin = cfg.mpi_bin np = cfg.np TauDEMWorkflow.watershed_delineation(np, cfg.dem, cfg.outlet_file, cfg.d8acc_threshold, True, cfg.dirs.taudem, mpi_bin, bin_dir, cfg.logs.delineation)
def log(lines, log_file=None): """Output log message.""" err = False for line in lines: print(line) if log_file is not None: UtilClass.writelog(log_file, line, 'append') if 'BAD TERMINATION' in line.upper(): err = True break if err: TauDEM.error('Error occurred when calling TauDEM function, please check!', log_file)
def convert2geojson(jsonfile, src_srs, dst_srs, src_file): """convert shapefile to geojson file""" if os.path.exists(jsonfile): os.remove(jsonfile) if sysstr == 'Windows': exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix else: exepath = FileClass.get_executable_fullpath('ogr2ogr') # os.system(s) s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % ( exepath, src_srs, dst_srs, jsonfile, src_file) UtilClass.run_command(s)
def mask_raster_cpp(bin_dir, maskfile, originalfiles, outputfiles, default_values, configfile): """Call mask_raster program (cpp version) to mask raster""" # write mask configuration file n = len(originalfiles) # write mask config file with open(configfile, 'w', encoding='utf-8') as f: f.write('%s\n' % maskfile) f.write('%d\n' % (n,)) for i in range(n): s = '%s\t%d\t%s\n' % (originalfiles[i], default_values[i], outputfiles[i]) f.write('%s' % s) # run command UtilClass.run_command('"%s/mask_raster" %s' % (bin_dir, configfile))
def mask_raster_cpp(bin_dir, maskfile, originalfiles, outputfiles, default_values, configfile): """Call mask_raster program (cpp version) to mask raster""" # write mask configuration file n = len(originalfiles) # write mask config file with open(configfile, 'w') as f: f.write(maskfile + '\n') f.write('%d\n' % (n,)) for i in range(n): s = '%s\t%d\t%s\n' % (originalfiles[i], default_values[i], outputfiles[i]) f.write(s) # run command UtilClass.run_command('"%s/mask_raster" %s' % (bin_dir, configfile))
def save_png_eps(plot, wp, name, plot_cfg=None): # type: (plt, AnyStr, AnyStr, Optional[PlotConfig]) -> None """Save figures, both png and eps formats""" # plot.tight_layout() if plot_cfg is None: plot_cfg = PlotConfig() if plot_cfg.plot_cn: wp = wp + os.path.sep + 'cn' UtilClass.mkdir(wp) for fmt in plot_cfg.fmts: fmt_dir = wp + os.path.sep + fmt UtilClass.mkdir(fmt_dir) figpath = fmt_dir + os.path.sep + name + '.' + fmt plot.savefig(figpath, dpi=plot_cfg.dpi)
def run_field_partition(bin_dir, maskf, streamf, flowf, luf, demf, thresh, arcgis_code=True): """Run fieldpartition program.""" cmd_str = '"%s/fieldpartition" -mask %s -stream %s -flow %s' \ ' -lu %s -dem %s -t %d' % (bin_dir, maskf, streamf, flowf, luf, demf, thresh) if arcgis_code: cmd_str += ' -arcgis' UtilClass.run_command(cmd_str)
def main(): """FUNCTION TESTS""" cur_path = UtilClass.current_path(lambda: 0) SEIMS_path = os.path.abspath(cur_path + '../../..') model_paths = ModelPaths(SEIMS_path, 'dianbu2', 'demo_dianbu2_model') prep_cfg = write_preprocess_config_file(model_paths, 'preprocess.ini') postp_cfg = write_postprocess_config_file(model_paths, 'postprocess.ini')
def main(): """FUNCTION TESTS""" cur_path = UtilClass.current_path(lambda: 0) SEIMS_path = os.path.abspath(cur_path + '../../..') model_paths = ModelPaths(SEIMS_path, 'dianbu2', 'demo_dianbu2_model') prep_cfg = write_preprocess_config_file(model_paths, 'preprocess.ini') postp_cfg = write_postprocess_config_file(model_paths, 'postprocess.ini')
def __init__(self, cf): # type: (ConfigParser) -> None """Initialization.""" SAConfig.__init__(self, cf) # initialize base class first # 1. Check the required key and values requiredkeys = [ 'COLLECTION', 'DISTRIBUTION', 'SUBSCENARIO', 'ENVEVAL', 'BASE_ENV', 'UNITJSON' ] for k in requiredkeys: if k not in self.bmps_info: raise ValueError( '%s: MUST be provided in BMPs_cfg_units or BMPs_info!' % k) # 2. Slope position units information units_json = self.bmps_info.get('UNITJSON') unitsf = self.model.model_dir + os.sep + units_json if not FileClass.is_file_exists(unitsf): raise Exception('UNITJSON file %s is not existed!' % unitsf) with open(unitsf, 'r', encoding='utf-8') as updownfo: self.units_infos = json.load(updownfo) self.units_infos = UtilClass.decode_strs_in_dict(self.units_infos) if 'overview' not in self.units_infos: raise ValueError('overview MUST be existed in the UNITJSON file.') if 'all_units' not in self.units_infos['overview']: raise ValueError( 'all_units MUST be existed in overview dict of UNITJSON.') self.units_num = self.units_infos['overview']['all_units'] # type: int # 3. Collection name and subscenario IDs self.bmps_coll = self.bmps_info.get('COLLECTION') # type: str self.bmps_subids = self.bmps_info.get('SUBSCENARIO') # type: List[int] # 4. Construct the dict of gene index to unit ID, and unit ID to gene index self.unit_to_gene = OrderedDict() # type: OrderedDict[int, int] self.gene_to_unit = dict() # type: Dict[int, int] # 5. Construct the upstream-downstream units of each unit if necessary self.updown_units = dict() # type: Dict[int, Dict[AnyStr, List[int]]]
def run(self): """Run SEIMS model""" stime = time.time() if not os.path.isdir(self.OutputDirectory) or not os.path.exists( self.OutputDirectory): os.makedirs(self.OutputDirectory) # If the input time period is not consistent with the predefined time period in FILE_IN. if self.simu_stime and self.simu_etime and self.simu_stime != self.start_time \ and self.simu_etime != self.end_time: self.ResetSimulationPeriod() # If the output time period is specified, reset the time period of all output IDs if self.out_stime and self.out_etime: self.ResetOutputsPeriod(self.OutputIDs, self.out_stime, self.out_etime) try: self.runlogs = UtilClass.run_command(self.Command) with open(self.OutputDirectory + os.sep + 'runlogs.txt', 'w', encoding='utf-8') as f: f.write('\n'.join(self.runlogs)) self.ParseTimespan(self.runlogs) self.run_success = True except CalledProcessError or IOError or Exception as err: # 1. SEIMS-based model running failed # 2. The OUTPUT directory was not been created successfully by SEIMS-based model # 3. Other unpredictable errors print('Run SEIMS model failed! %s' % str(err)) self.run_success = False self.runtime = time.time() - stime return self.run_success
def reclassify_landuse_parameters(bin_dir, config_file, dst_dir, landuse_file, lookup_dir, landuse_attr_list, default_landuse_id): """ Reclassify landuse parameters by lookup table. TODO(LJ): this function should be replaced by replaceByDict() function! """ # prepare reclassify configuration file with open(config_file, 'w') as f_reclass_lu: f_reclass_lu.write("%s\t%d\n" % (landuse_file, default_landuse_id)) f_reclass_lu.write("%s\n" % lookup_dir) f_reclass_lu.write(dst_dir + "\n") n = len(landuse_attr_list) f_reclass_lu.write("%d\n" % n) f_reclass_lu.write("\n".join(landuse_attr_list)) s = '"%s/reclassify" %s' % (bin_dir, config_file) UtilClass.run_command(s)
def reclassify_landuse_parameters(bin_dir, config_file, dst_dir, landuse_file, lookup_dir, landuse_attr_list, default_landuse_id): """ Reclassify landuse parameters by lookup table. TODO(LJ): this function should be replaced by replaceByDict() function! """ # prepare reclassify configuration file with open(config_file, 'w', encoding='utf-8') as f_reclass_lu: f_reclass_lu.write('%s\t%d\n' % (landuse_file, default_landuse_id)) f_reclass_lu.write('%s\n' % lookup_dir) f_reclass_lu.write(dst_dir + "\n") n = len(landuse_attr_list) f_reclass_lu.write('%d\n' % n) f_reclass_lu.write('\n'.join(landuse_attr_list)) s = '"%s/reclassify" %s' % (bin_dir, config_file) UtilClass.run_command(s)
def post_process_of_delineated_data(cfg): """Do some necessary transfer for subbasin, stream, and flow direction raster.""" # inputs stream_net_file = cfg.taudems.streamnet_shp subbasin_file = cfg.taudems.subbsn_m flow_dir_file_tau = cfg.taudems.d8flow_m stream_raster_file = cfg.taudems.stream_m # outputs # -- shapefile shp_dir = cfg.dirs.geoshp UtilClass.mkdir(shp_dir) # ---- outlet, copy from DirNameUtils.TauDEM FileClass.copy_files(cfg.taudems.outlet_m, cfg.vecs.outlet) # ---- reaches output_reach_file = cfg.vecs.reach # ---- subbasins subbasin_vector_file = cfg.vecs.subbsn # -- raster file output_subbasin_file = cfg.spatials.subbsn output_flow_dir_file = cfg.spatials.d8flow output_stream_link_file = cfg.spatials.stream_link output_hillslope_file = cfg.spatials.hillslope id_map = StreamnetUtil.serialize_streamnet(stream_net_file, output_reach_file) RasterUtilClass.raster_reclassify(subbasin_file, id_map, output_subbasin_file, GDT_Int32) StreamnetUtil.assign_stream_id_raster(stream_raster_file, output_subbasin_file, output_stream_link_file) # Convert D8 encoding rule to ArcGIS D8Util.convert_code(flow_dir_file_tau, output_flow_dir_file) # convert raster to shapefile (for subbasin and basin) print('Generating subbasin vector...') VectorUtilClass.raster2shp(output_subbasin_file, subbasin_vector_file, 'subbasin', FieldNames.subbasin_id) mask_file = cfg.spatials.mask basin_vector = cfg.vecs.bsn print('Generating basin vector...') VectorUtilClass.raster2shp(mask_file, basin_vector, 'basin', FieldNames.basin) # delineate hillslope DelineateHillslope.downstream_method_whitebox(output_stream_link_file, flow_dir_file_tau, output_hillslope_file)
def main(): cur_path = UtilClass.current_path(lambda: 0) SEIMS_path = os.path.abspath(cur_path + '../../..') model_paths = ModelPaths(SEIMS_path, 'dianbu2', 'demo_dianbu2_model') seims_cfg = write_preprocess_config_file(model_paths, 'preprocess.ini') SpatialDelineation.workflow(seims_cfg) # Spatial delineation by TauDEM ImportMongodbClass.workflow(seims_cfg) # Import to MongoDB database
def main(): cur_path = UtilClass.current_path(lambda: 0) SEIMS_path = os.path.abspath(cur_path + '../../..') model_paths = ModelPaths(SEIMS_path, 'dianbu2', 'demo_dianbu2_model') scenario_id = 0 seims_obj = MainSEIMS(model_paths.bin_dir, model_paths.model_dir, scenario_id=scenario_id) seims_obj.run()
def __init__(self, bpath, data_dir_name, model_dir_name): self.mpi_bin = None self.bin_dir = bpath + os.path.sep + 'bin' self.prescript_dir = bpath + os.path.sep + 'seims' + os.path.sep + 'preprocess' self.base_dir = bpath + os.path.sep + 'data' + os.path.sep + data_dir_name self.cfg_dir = self.base_dir + os.path.sep + 'model_configs' self.model_dir = self.base_dir + os.path.sep + model_dir_name self.data_dir = self.base_dir + os.path.sep + 'data_prepare' self.clim_dir = self.data_dir + os.path.sep + 'climate' self.spatial_dir = self.data_dir + os.path.sep + 'spatial' self.observe_dir = self.data_dir + os.path.sep + 'observed' self.scenario_dir = self.data_dir + os.path.sep + 'scenario' self.lookup_dir = self.data_dir + os.path.sep + 'lookup' self.workspace = self.base_dir + os.path.sep + 'workspace' UtilClass.mkdir(self.workspace) print('SEIMS binary location: %s' % self.bin_dir) print('Demo data location: %s' % self.base_dir) print('Data preprocessing location: %s' % self.workspace)
def __init__(self, bpath, data_dir_name, model_dir_name): self.mpi_bin = None self.bin_dir = bpath + os.path.sep + 'bin' self.prescript_dir = bpath + os.path.sep + 'seims' + os.path.sep + 'preprocess' self.base_dir = bpath + os.path.sep + 'data' + os.path.sep + data_dir_name self.cfg_dir = self.base_dir + os.path.sep + 'model_configs' self.model_dir = self.base_dir + os.path.sep + model_dir_name self.data_dir = self.base_dir + os.path.sep + 'data_prepare' self.clim_dir = self.data_dir + os.path.sep + 'climate' self.spatial_dir = self.data_dir + os.path.sep + 'spatial' self.observe_dir = self.data_dir + os.path.sep + 'observed' self.scenario_dir = self.data_dir + os.path.sep + 'scenario' self.lookup_dir = self.data_dir + os.path.sep + 'lookup' self.workspace = self.base_dir + os.path.sep + 'workspace' UtilClass.mkdir(self.workspace) print('SEIMS binary location: %s' % self.bin_dir) print('Demo data location: %s' % self.base_dir) print('Data preprocess location: %s' % self.workspace)
def main(): cur_path = UtilClass.current_path(lambda: 0) SEIMS_path = os.path.abspath(cur_path + '../../..') model_paths = ModelPaths(SEIMS_path, 'dianbu2', 'demo_dianbu2_model') # hydrograph, e.g. discharge scenario_id = 0 post_cfg = write_postprocess_config_file(model_paths, 'postprocess.ini', scenario_id) TimeSeriesPlots(post_cfg).generate_plots()
def prepare_node_with_weight_for_metis(graph, weight, wp): # construct the METIS input file UtilClass.mkdir(wp) metis_input = r'%s/metis.txt' % wp ns = graph.nodes() with open(metis_input, 'w') as f: f.write(str(len(ns)) + '\t' + str(len(graph.edges())) + '\t' + '010\t1\n') for node in ns: if node <= 0: continue f.write(str(weight[node][ImportReaches2Mongo._NUMCELLS]) + '\t') for e in graph.out_edges(node): if e[1] > 0: f.write(str(e[1]) + '\t') for e in graph.in_edges(node): if e[0] > 0: f.write(str(e[0]) + '\t') f.write('\n') return metis_input
def spatial_rasters(cfg, subbasin_num): """Import spatial raster data.""" if subbasin_num == 0: # the whole basin! start_id = 0 subbasin_file = cfg.spatials.mask else: start_id = 1 subbasin_file = cfg.spatials.subbsn str_cmd = '"%s/import_raster" %s %s %s %s %s %d' % ( cfg.seims_bin, subbasin_file, cfg.dirs.geodata2db, cfg.spatial_db, DBTableNames.gridfs_spatial, cfg.hostname, cfg.port) # I recommend not output to directory. lj # UtilClass.mkdir(cfg.dirs.import2db) # for i in range(start_id, subbasin_num + 1): # subdir = cfg.dirs.import2db + os.path.sep + str(i) # UtilClass.rmmkdir(subdir) # str_cmd = '%s %s' % (str_cmd, cfg.dirs.import2db) UtilClass.run_command(str_cmd)
def main(): cur_path = UtilClass.current_path(lambda: 0) SEIMS_path = os.path.abspath(cur_path + '../../..') model_paths = ModelPaths(SEIMS_path, 'dianbu2', 'demo_dianbu2_model') # hydrograph, e.g. discharge scenario_id = 0 post_cfg = write_postprocess_config_file(model_paths, 'postprocess.ini', scenario_id) TimeSeriesPlots(post_cfg).generate_plots()
def post_process_of_delineated_data(cfg): """Do some necessary transfer for subbasin, stream, and flow direction raster.""" # inputs stream_net_file = cfg.taudems.streamnet_shp subbasin_file = cfg.taudems.subbsn_m flow_dir_file_tau = cfg.taudems.d8flow_m stream_raster_file = cfg.taudems.stream_m # outputs # -- shapefile shp_dir = cfg.dirs.geoshp UtilClass.mkdir(shp_dir) # ---- outlet, copy from DirNameUtils.TauDEM FileClass.copy_files(cfg.taudems.outlet_m, cfg.vecs.outlet) # ---- reaches output_reach_file = cfg.vecs.reach # ---- subbasins subbasin_vector_file = cfg.vecs.subbsn # -- raster file output_subbasin_file = cfg.spatials.subbsn output_flow_dir_file = cfg.spatials.d8flow output_stream_link_file = cfg.spatials.stream_link output_hillslope_file = cfg.spatials.hillslope id_map = StreamnetUtil.serialize_streamnet(stream_net_file, output_reach_file) RasterUtilClass.raster_reclassify(subbasin_file, id_map, output_subbasin_file, GDT_Int32) StreamnetUtil.assign_stream_id_raster(stream_raster_file, output_subbasin_file, output_stream_link_file) # Convert D8 encoding rule to ArcGIS D8Util.convert_code(flow_dir_file_tau, output_flow_dir_file) # convert raster to shapefile (for subbasin and basin) print('Generating subbasin vector...') VectorUtilClass.raster2shp(output_subbasin_file, subbasin_vector_file, 'subbasin', FieldNames.subbasin_id) mask_file = cfg.spatials.mask basin_vector = cfg.vecs.bsn print('Generating basin vector...') VectorUtilClass.raster2shp(mask_file, basin_vector, 'basin', FieldNames.basin) # delineate hillslope DelineateHillslope.downstream_method_whitebox(output_stream_link_file, flow_dir_file_tau, output_hillslope_file)
def spatial_rasters(cfg, subbasin_num): """Import spatial raster data.""" if subbasin_num == 0: # the whole basin! start_id = 0 subbasin_file = cfg.spatials.mask else: start_id = 1 subbasin_file = cfg.spatials.subbsn str_cmd = '"%s/import_raster" %s %s %s %s %s %d' % (cfg.seims_bin, subbasin_file, cfg.dirs.geodata2db, cfg.spatial_db, DBTableNames.gridfs_spatial, cfg.hostname, cfg.port) # I recommend not output to directory. lj # UtilClass.mkdir(cfg.dirs.import2db) # for i in range(start_id, subbasin_num + 1): # subdir = cfg.dirs.import2db + os.path.sep + str(i) # UtilClass.rmmkdir(subdir) # str_cmd = '%s %s' % (str_cmd, cfg.dirs.import2db) UtilClass.run_command(str_cmd)
def field_partition(cfg): """Fields partition incorporating spatial topology. Refers to: Wu, Hui, A.-Xing Zhu, Jun-Zhi Liu, Yong-Bo Liu, and Jing-Chao Jiang. 2017. "Best Management Practices Optimization at Watershed Scale: Incorporating Spatial Topology among Fields." Water Resources Management, doi: 10.1007/s11269-017-1801-8. """ if not cfg.fields_partition: # Do field partition return maskf = cfg.spatials.mask streamf = cfg.spatials.stream_link flowf = cfg.spatials.d8flow luf = cfg.spatials.landuse demf = cfg.spatials.filldem threshs = cfg.fields_partition_thresh for thresh in threshs: # run command UtilClass.run_command('"%s/fieldpartition" -mask %s -stream %s ' '-flow %s -lu %s -dem %s -t %d' % (cfg.seims_bin, maskf, streamf, flowf, luf, demf, thresh))
def run(self): """Run SEIMS model""" stime = time.time() if not os.path.isdir(self.OutputDirectory) or not os.path.exists(self.OutputDirectory): os.makedirs(self.OutputDirectory) try: run_logs = UtilClass.run_command(self.Command) self.ParseTimespan(run_logs) self.run_success = True except CalledProcessError or Exception: print('Run SEIMS model failed!') self.run_success = False self.runtime = time.time() - stime return self.run_success
def calculate_sensitivity(self): """Calculate Morris elementary effects. It is worth to be noticed that evaluate_models() allows to return several output variables, hence we should calculate each of them separately. """ if not self.psa_si: if FileClass.is_file_exists(self.cfg.outfiles.psa_si_json): with open(self.cfg.outfiles.psa_si_json, 'r') as f: self.psa_si = UtilClass.decode_strs_in_dict(json.load(f)) return if not self.objnames: if FileClass.is_file_exists('%s/objnames.pickle' % self.cfg.psa_outpath): with open('%s/objnames.pickle' % self.cfg.psa_outpath, 'r') as f: self.objnames = pickle.load(f) if self.output_values is None or len(self.output_values) == 0: self.evaluate_models() if self.param_values is None or len(self.param_values) == 0: self.generate_samples() if not self.param_defs: self.read_param_ranges() row, col = self.output_values.shape assert (row == self.run_count) for i in range(col): print(self.objnames[i]) if self.cfg.method == 'morris': tmp_Si = morris_alz(self.param_defs, self.param_values, self.output_values[:, i], conf_level=0.95, print_to_console=True, num_levels=self.cfg.morris.num_levels, grid_jump=self.cfg.morris.grid_jump) elif self.cfg.method == 'fast': tmp_Si = fast_alz(self.param_defs, self.output_values[:, i], print_to_console=True) else: raise ValueError('%s method is not supported now!' % self.cfg.method) self.psa_si[i] = tmp_Si # print(self.psa_si) # Save as json, which can be loaded by json.load() json_data = json.dumps(self.psa_si, indent=4, cls=SpecialJsonEncoder) with open(self.cfg.outfiles.psa_si_json, 'w') as f: f.write(json_data) self.output_psa_si()
def read_bmp_parameters(self): """Read BMP configuration from MongoDB.""" client = ConnectMongoDB(self.hostname, self.port) conn = client.get_conn() scenariodb = conn[self.bmp_scenario_db] bmpcoll = scenariodb[self.bmps_coll] findbmps = bmpcoll.find({}, no_cursor_timeout=True) for fb in findbmps: fb = UtilClass.decode_strs_in_dict(fb) if 'SUBSCENARIO' not in fb: continue curid = fb['SUBSCENARIO'] if curid not in self.bmps_subids: continue if curid not in self.bmps_params: self.bmps_params[curid] = dict() for k, v in fb.items(): if k == 'SUBSCENARIO': continue elif k == 'LANDUSE': if isinstance(v, int): v = [v] elif v == 'ALL' or v == '': v = None else: v = StringClass.extract_numeric_values_from_string(v) v = [int(abs(nv)) for nv in v] self.bmps_params[curid][k] = v[:] elif k == 'SLPPOS': if isinstance(v, int): v = [v] elif v == 'ALL' or v == '': v = list(self.slppos_tags.keys()) else: v = StringClass.extract_numeric_values_from_string(v) v = [int(abs(nv)) for nv in v] self.bmps_params[curid][k] = v[:] else: self.bmps_params[curid][k] = v client.close()
def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False, workingdir=None, mpi_bin=None, bin_dir=None, logfile=None, runtime_file=None, hostfile=None): """Watershed Delineation.""" # 1. Check directories if not os.path.exists(dem): TauDEM.error('DEM: %s is not existed!' % dem) dem = os.path.abspath(dem) if workingdir is None: workingdir = os.path.dirname(dem) namecfg = TauDEMFilesUtils(workingdir) workingdir = namecfg.workspace UtilClass.mkdir(workingdir) # 2. Check log file if logfile is not None and FileClass.is_file_exists(logfile): os.remove(logfile) # 3. Get predefined intermediate file names filled_dem = namecfg.filldem flow_dir = namecfg.d8flow slope = namecfg.slp flow_dir_dinf = namecfg.dinf slope_dinf = namecfg.dinf_slp dir_code_dinf = namecfg.dinf_d8dir weight_dinf = namecfg.dinf_weight acc = namecfg.d8acc stream_raster = namecfg.stream_raster default_outlet = namecfg.outlet_pre modified_outlet = namecfg.outlet_m stream_skeleton = namecfg.stream_pd acc_with_weight = namecfg.d8acc_weight stream_order = namecfg.stream_order ch_network = namecfg.channel_net ch_coord = namecfg.channel_coord stream_net = namecfg.streamnet_shp subbasin = namecfg.subbsn dist2_stream_d8 = namecfg.dist2stream_d8 # 4. perform calculation UtilClass.writelog(logfile, '[Output] %d..., %s' % (10, 'pitremove DEM...'), 'a') TauDEM.pitremove(np, dem, filled_dem, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (20, 'Calculating D8 and Dinf flow direction...'), 'a') TauDEM.d8flowdir(np, filled_dem, flow_dir, slope, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) TauDEM.dinfflowdir(np, filled_dem, flow_dir_dinf, slope_dinf, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) DinfUtil.output_compressed_dinf(flow_dir_dinf, dir_code_dinf, weight_dinf) UtilClass.writelog(logfile, '[Output] %d..., %s' % (30, 'D8 flow accumulation...'), 'a') TauDEM.aread8(np, flow_dir, acc, None, None, False, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (40, 'Generating stream raster initially...'), 'a') min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(acc) TauDEM.threshold(np, acc, stream_raster, mean_accum, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (50, 'Moving outlet to stream...'), 'a') if outlet_file is None: outlet_file = default_outlet TauDEM.connectdown(np, flow_dir, acc, outlet_file, wtsd=None, workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) TauDEM.moveoutletstostrm(np, flow_dir, stream_raster, outlet_file, modified_outlet, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (60, 'Generating stream skeleton...'), 'a') TauDEM.peukerdouglas(np, filled_dem, stream_skeleton, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (70, 'Flow accumulation with outlet...'), 'a') tmp_outlet = None if singlebasin: tmp_outlet = modified_outlet TauDEM.aread8(np, flow_dir, acc_with_weight, tmp_outlet, stream_skeleton, False, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) if thresh <= 0: # find the optimal threshold using dropanalysis function UtilClass.writelog(logfile, '[Output] %d..., %s' % (75, 'Drop analysis to select optimal threshold...'), 'a') min_accum, max_accum, mean_accum, std_accum = \ RasterUtilClass.raster_statistics(acc_with_weight) if mean_accum - std_accum < 0: minthresh = mean_accum else: minthresh = mean_accum - std_accum maxthresh = mean_accum + std_accum numthresh = 20 logspace = 'true' drp_file = namecfg.drptxt TauDEM.dropanalysis(np, filled_dem, flow_dir, acc_with_weight, acc_with_weight, modified_outlet, minthresh, maxthresh, numthresh, logspace, drp_file, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) if not FileClass.is_file_exists(drp_file): raise RuntimeError('Dropanalysis failed and drp.txt was not created!') with open(drp_file, 'r', encoding='utf-8') as drpf: temp_contents = drpf.read() (beg, thresh) = temp_contents.rsplit(' ', 1) print(thresh) UtilClass.writelog(logfile, '[Output] %d..., %s' % (80, 'Generating stream raster...'), 'a') TauDEM.threshold(np, acc_with_weight, stream_raster, float(thresh), workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (90, 'Generating stream net...'), 'a') TauDEM.streamnet(np, filled_dem, flow_dir, acc_with_weight, stream_raster, modified_outlet, stream_order, ch_network, ch_coord, stream_net, subbasin, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d..., %s' % (95, 'Calculating distance to stream (D8)...'), 'a') TauDEM.d8hdisttostrm(np, flow_dir, stream_raster, dist2_stream_d8, 1, workingdir, mpi_bin, bin_dir, log_file=logfile, runtime_file=runtime_file, hostfile=hostfile) UtilClass.writelog(logfile, '[Output] %d.., %s' % (100, 'Original subbasin delineation is finished!'), 'a')
def error(msg, log_file=None): """Print, output error message and raise RuntimeError.""" UtilClass.print_msg(msg + os.linesep) if log_file is not None: UtilClass.writelog(log_file, msg, 'append') raise RuntimeError(msg)