예제 #1
0
파일: hydro.py 프로젝트: crazyzlj/PyGeoC
 def convert_code(in_file, out_file, in_alg='taudem', out_alg='arcgis', datatype=None):
     """
     convert D8 flow direction code from one algorithm to another.
     Args:
         in_file: input raster file path
         out_file: output raster file path
         in_alg: available algorithms are in FlowModelConst.d8_dirs. "taudem" is the default
         out_alg: same as in_alg. "arcgis" is the default
         datatype: default is None and use the datatype of the in_file
     """
     FileClass.check_file_exists(in_file)
     in_alg = in_alg.lower()
     out_alg = out_alg.lower()
     if in_alg not in FlowModelConst.d8_dirs or out_alg not in FlowModelConst.d8_dirs:
         raise RuntimeError('The input algorithm name should one of %s' %
                            ', '.join(list(FlowModelConst.d8_dirs.keys())))
     convert_dict = dict()
     in_code = FlowModelConst.d8_dirs.get(in_alg)
     out_code = FlowModelConst.d8_dirs.get(out_alg)
     assert len(in_code) == len(out_code)
     for i, tmp_in_code in enumerate(in_code):
         convert_dict[tmp_in_code] = out_code[i]
     if datatype is not None and datatype in GDALDataType:
         RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file, datatype)
     else:
         RasterUtilClass.raster_reclassify(in_file, convert_dict, out_file)
예제 #2
0
파일: scenario.py 프로젝트: crazyzlj/SEIMS
    def calculate_environment(self):
        if not self.modelrun:  # no evaluate done
            self.economy = self.worst_econ
            self.environment = self.worst_env
            return
        rfile = self.modelout_dir + os.path.sep + self.bmps_info['ENVEVAL']

        if not FileClass.is_file_exists(rfile):
            time.sleep(5)  # sleep 5 seconds wait for the ouput
        if not FileClass.is_file_exists(rfile):
            print('WARNING: Although SEIMS model runs successfully, the desired output: %s'
                  ' cannot be found!' % rfile)
            self.economy = self.worst_econ
            self.environment = self.worst_env
            return

        base_amount = self.bmps_info['BASE_ENV']
        if StringClass.string_match(rfile.split('.')[-1], 'tif'):  # Raster data
            rr = RasterUtilClass.read_raster(rfile)
            soil_erosion_amount = rr.get_sum() / self.timerange  # unit: year
            # reduction rate of soil erosion
            self.environment = (base_amount - soil_erosion_amount) / base_amount
        elif StringClass.string_match(rfile.split('.')[-1], 'txt'):  # Time series data
            sed_sum = read_simulation_from_txt(self.modelout_dir)  # TODO, fix it later, lj
            self.environment = (base_amount - sed_sum) / base_amount
        else:
            self.economy = self.worst_econ
            self.environment = self.worst_env
            return
예제 #3
0
    def add_group_field(shp_file, subbasin_field_name, group_metis_dict):
        """add group information to subbasin ESRI shapefile

        Args:
            shp_file: Subbasin Shapefile
            subbasin_field_name: field name of subbasin
            group_metis_dict: returned by func`metis_partition`
        """
        if not group_metis_dict:
            return
        ds_reach = ogr_Open(shp_file, update=True)
        layer_reach = ds_reach.GetLayer(0)
        layer_def = layer_reach.GetLayerDefn()
        icode = layer_def.GetFieldIndex(subbasin_field_name)
        igrp = layer_def.GetFieldIndex(ImportReaches2Mongo._GROUP)
        ikgrp = layer_def.GetFieldIndex(ImportReaches2Mongo._KMETIS)
        ipgrp = layer_def.GetFieldIndex(ImportReaches2Mongo._PMETIS)

        if igrp < 0:
            new_field = ogr_FieldDefn(ImportReaches2Mongo._GROUP, OFTInteger)
            layer_reach.CreateField(new_field)
        if ikgrp < 0:
            new_field = ogr_FieldDefn(ImportReaches2Mongo._KMETIS, OFTInteger)
            layer_reach.CreateField(new_field)
        if ipgrp < 0:
            new_field = ogr_FieldDefn(ImportReaches2Mongo._PMETIS, OFTInteger)
            layer_reach.CreateField(new_field)

        ftmap = dict()
        layer_reach.ResetReading()
        ft = layer_reach.GetNextFeature()
        while ft is not None:
            tmpid = ft.GetFieldAsInteger(icode)
            ftmap[tmpid] = ft
            ft = layer_reach.GetNextFeature()

        groups = group_metis_dict[1]['group']
        for i, n in enumerate(groups):
            for node, d in group_metis_dict.items():
                ftmap[node].SetField(ImportReaches2Mongo._GROUP, n)
                ftmap[node].SetField(ImportReaches2Mongo._KMETIS, d['kmetis'][i])
                ftmap[node].SetField(ImportReaches2Mongo._PMETIS, d['pmetis'][i])
                layer_reach.SetFeature(ftmap[node])
            # copy the reach file to new file
            prefix = os.path.splitext(shp_file)[0]
            dstfile = prefix + "_" + str(n) + ".shp"
            FileClass.copy_files(shp_file, dstfile)

        layer_reach.SyncToDisk()
        ds_reach.Destroy()
        del ds_reach
예제 #4
0
파일: raster.py 프로젝트: crazyzlj/PyGeoC
    def write_gtiff_file(f_name, n_rows, n_cols, data, geotransform, srs, nodata_value,
                         gdal_type=GDT_Float32):
        """Output Raster to GeoTiff format file.

        Args:
            f_name: output gtiff file name.
            n_rows: Row count.
            n_cols: Col count.
            data: 2D array data.
            geotransform: geographic transformation.
            srs: coordinate system.
            nodata_value: nodata value.
            gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,
                                                                  GDT_Float32 as default.
        """
        UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name)))
        driver = gdal_GetDriverByName(str('GTiff'))
        try:
            ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type)
        except Exception:
            print('Cannot create output file %s' % f_name)
            return
        ds.SetGeoTransform(geotransform)
        try:
            ds.SetProjection(srs.ExportToWkt())
        except AttributeError or Exception:
            ds.SetProjection(srs)
        ds.GetRasterBand(1).SetNoDataValue(nodata_value)
        # if data contains numpy.nan, then replaced by nodata_value
        if isinstance(data, numpy.ndarray) and data.dtype in [numpy.dtype('int'),
                                                              numpy.dtype('float')]:
            data = numpy.where(numpy.isnan(data), nodata_value, data)
        ds.GetRasterBand(1).WriteArray(data)
        ds = None
예제 #5
0
def main():
    from preprocess.config import parse_ini_configuration

    seims_cfg = parse_ini_configuration()
    client = ConnectMongoDB(seims_cfg.hostname, seims_cfg.port)
    conn = client.get_conn()
    db_model = conn[seims_cfg.spatial_db]

    spatial_gfs = GridFS(db_model, DBTableNames.gridfs_spatial)

    csv_path = r'C:\z_data\zhongTianShe\model_data_seims\field_scale_params'
    csv_files = FileClass.get_full_filename_by_suffixes(csv_path, ['.csv'])
    field_count = 7419
    prefix = 9999
    # Create mask file
    mask_name = '%d_MASK' % prefix
    mask_array = [[1] * field_count]
    import_array_to_mongodb(spatial_gfs, mask_array, mask_name)

    # Create spatial parameters
    for csv_file in csv_files:
        print('Import %s...' % csv_file)
        param_arrays = read_field_arrays_from_csv(csv_file)
        for key, value in list(param_arrays.items()):
            import_array_to_mongodb(spatial_gfs, value, '%d_%s' % (prefix, key))
예제 #6
0
파일: raster.py 프로젝트: crazyzlj/PyGeoC
    def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value):
        """Output Raster to ASCII file.

        Args:
            filename: output ASCII filename.
            data: 2D array data.
            xsize: Col count.
            ysize: Row count.
            geotransform: geographic transformation.
            nodata_value: nodata_flow value.
        """
        UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename)))
        header = 'NCOLS %d\n' \
                 'NROWS %d\n' \
                 'XLLCENTER %f\n' \
                 'YLLCENTER %f\n' \
                 'CELLSIZE %f\n' \
                 'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1],
                                      geotransform[3] - (ysize - 0.5) * geotransform[1],
                                      geotransform[1], nodata_value)

        with open(filename, 'w', encoding='utf-8') as f:
            f.write(header)
            for i in range(0, ysize):
                for j in range(0, xsize):
                    f.write('%s\t' % repr(data[i][j]))
                f.write('\n')
        f.close()
예제 #7
0
    def workflow(cfg, maindb, climdb):
        """
        This function mainly to import measurement data to MongoDB
        data type may include Q (discharge, m3/s), SED (mg/L), tn (mg/L), tp (mg/L), etc.
        the required parameters that defined in configuration file (*.ini)
        """
        if not cfg.use_observed:
            return False
        c_list = climdb.collection_names()
        if not StringClass.string_in_list(DBTableNames.observes, c_list):
            climdb.create_collection(DBTableNames.observes)
        else:
            climdb.drop_collection(DBTableNames.observes)
        if not StringClass.string_in_list(DBTableNames.sites, c_list):
            climdb.create_collection(DBTableNames.sites)
        if not StringClass.string_in_list(DBTableNames.var_desc, c_list):
            climdb.create_collection(DBTableNames.var_desc)

        file_list = FileClass.get_full_filename_by_suffixes(cfg.observe_dir, ['.txt'])
        meas_file_list = []
        site_loc = []
        for fl in file_list:
            if StringClass.is_substring('observed_', fl):
                meas_file_list.append(fl)
            else:
                site_loc.append(fl)
        ImportObservedData.data_from_txt(maindb, climdb, meas_file_list, site_loc,
                                         cfg.spatials.subbsn)
        return True
예제 #8
0
파일: config.py 프로젝트: crazyzlj/SEIMS
def get_psa_config():
    """Parse arguments.
    Returns:
        cf: ConfigParse object of *.ini file
        mtd: Parameters sensitivity method name, currently, 'morris' and 'fast' are supported.
    """
    # define input arguments
    parser = argparse.ArgumentParser(description="Execute parameters sensitivity analysis.")
    parser.add_argument('-ini', type=str, help="Full path of configuration file")
    # add mutually group
    psa_group = parser.add_mutually_exclusive_group()
    psa_group.add_argument('-morris', action='store_true', help='Run Morris Screening method')
    psa_group.add_argument('-fast', action='store_true', help='Run FAST variant-based method')
    # parse arguments
    args = parser.parse_args()
    ini_file = args.ini
    psa_mtd = 'morris'  # Default
    if args.fast:
        psa_mtd = 'fast'
    elif args.morris:
        psa_mtd = 'morris'
    if not FileClass.is_file_exists(ini_file):
        raise ImportError('Configuration file is not existed: %s' % ini_file)
    cf = ConfigParser()
    cf.read(ini_file)
    return cf, psa_mtd
예제 #9
0
 def output_hillslope(method_id):
     """Output hillslope according different stream cell value method."""
     for (tmp_row, tmp_col) in stream_coors:
         tmp_hillslp_ids = DelineateHillslope.cal_hs_codes(max_id,
                                                           stream_data[tmp_row][tmp_col])
         if 0 < method_id < 3:
             hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[method_id]
             # is head stream cell?
             if (tmp_row, tmp_col) in headstream_coors:
                 hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[0]
         elif method_id == 3:
             hillslope_mtx[tmp_row][tmp_col] = DEFAULT_NODATA
     # Output to raster file
     hillslope_out_new = hillslope_out
     dirpath = os.path.dirname(hillslope_out_new) + os.path.sep
     corename = FileClass.get_core_name_without_suffix(hillslope_out_new)
     if method_id == 1:
         hillslope_out_new = dirpath + corename + '_right.tif'
     elif method_id == 2:
         hillslope_out_new = dirpath + corename + '_left.tif'
     elif method_id == 3:
         hillslope_out_new = dirpath + corename + '_nodata.tif'
     RasterUtilClass.write_gtiff_file(hillslope_out_new, nrows, ncols,
                                      hillslope_mtx,
                                      geotrans, srs, DEFAULT_NODATA, datatype)
예제 #10
0
 def calculate_sensitivity(self):
     """Calculate Morris elementary effects.
        It is worth to be noticed that evaluate_models() allows to return
        several output variables, hence we should calculate each of them separately.
     """
     if not self.psa_si:
         if FileClass.is_file_exists(self.cfg.outfiles.psa_si_json):
             with open(self.cfg.outfiles.psa_si_json, 'r') as f:
                 self.psa_si = UtilClass.decode_strs_in_dict(json.load(f))
                 return
     if not self.objnames:
         if FileClass.is_file_exists('%s/objnames.pickle' % self.cfg.psa_outpath):
             with open('%s/objnames.pickle' % self.cfg.psa_outpath, 'r') as f:
                 self.objnames = pickle.load(f)
     if self.output_values is None or len(self.output_values) == 0:
         self.evaluate_models()
     if self.param_values is None or len(self.param_values) == 0:
         self.generate_samples()
     if not self.param_defs:
         self.read_param_ranges()
     row, col = self.output_values.shape
     assert (row == self.run_count)
     for i in range(col):
         print(self.objnames[i])
         if self.cfg.method == 'morris':
             tmp_Si = morris_alz(self.param_defs,
                                 self.param_values,
                                 self.output_values[:, i],
                                 conf_level=0.95, print_to_console=True,
                                 num_levels=self.cfg.morris.num_levels,
                                 grid_jump=self.cfg.morris.grid_jump)
         elif self.cfg.method == 'fast':
             tmp_Si = fast_alz(self.param_defs, self.output_values[:, i],
                               print_to_console=True)
         else:
             raise ValueError('%s method is not supported now!' % self.cfg.method)
         self.psa_si[i] = tmp_Si
     # print(self.psa_si)
     # Save as json, which can be loaded by json.load()
     json_data = json.dumps(self.psa_si, indent=4, cls=SpecialJsonEncoder)
     with open(self.cfg.outfiles.psa_si_json, 'w') as f:
         f.write(json_data)
     self.output_psa_si()
예제 #11
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def pitremove(np, dem, filleddem, workingdir=None, mpiexedir=None, exedir=None, log_file=None,
               runtime_file=None, hostfile=None):
     """Run pit remove using the flooding approach """
     fname = TauDEM.func_name('pitremove')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-z': dem}, workingdir,
                       None,
                       {'-fel': filleddem},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #12
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def dinfflowdir(np, filleddem, flowangle, slope, workingdir=None, mpiexedir=None, exedir=None,
                 log_file=None, runtime_file=None, hostfile=None):
     """Run Dinf flow direction"""
     fname = TauDEM.func_name('dinfflowdir')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-fel': filleddem}, workingdir,
                       None,
                       {'-ang': flowangle, '-slp': slope},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #13
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def gridnet(np, pfile, plenfile, tlenfile, gordfile, outlet=None, workingdir=None,
             mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
     """Run gridnet"""
     fname = TauDEM.func_name('gridnet')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-p': pfile, '-o': outlet}, workingdir,
                       None,
                       {'-plen': plenfile, '-tlen': tlenfile, '-gord': gordfile},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #14
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def threshold(np, acc, stream_raster, threshold=100., workingdir=None,
               mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
     """Run threshold for stream raster"""
     fname = TauDEM.func_name('threshold')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-ssa': acc}, workingdir,
                       {'-thresh': threshold},
                       {'-src': stream_raster},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #15
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def peukerdouglas(np, fel, streamSkeleton, workingdir=None, mpiexedir=None, exedir=None,
                   log_file=None, runtime_file=None, hostfile=None):
     """Run peuker-douglas function"""
     fname = TauDEM.func_name('peukerdouglas')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-fel': fel}, workingdir,
                       None,
                       {'-ss': streamSkeleton},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #16
0
    def read_crop_lookup_table(crop_lookup_file):
        """read crop lookup table"""
        FileClass.check_file_exists(crop_lookup_file)
        data_items = read_data_items_from_txt(crop_lookup_file)
        attr_dic = dict()
        fields = data_items[0]
        n = len(fields)
        for i in range(n):
            attr_dic[fields[i]] = dict()
        for items in data_items[1:]:
            cur_id = int(items[0])

            for i in range(n):
                dic = attr_dic[fields[i]]
                try:
                    dic[cur_id] = float(items[i])
                except ValueError:
                    dic[cur_id] = items[i]
        return attr_dic
예제 #17
0
파일: vector.py 프로젝트: crazyzlj/PyGeoC
 def raster2shp(rasterfile, vectorshp, layername=None, fieldname=None,
                band_num=1, mask='default'):
     """Convert raster to ESRI shapefile"""
     FileClass.remove_files(vectorshp)
     FileClass.check_file_exists(rasterfile)
     # this allows GDAL to throw Python Exceptions
     gdal.UseExceptions()
     src_ds = gdal.Open(rasterfile)
     if src_ds is None:
         print('Unable to open %s' % rasterfile)
         sys.exit(1)
     try:
         srcband = src_ds.GetRasterBand(band_num)
     except RuntimeError as e:
         # for example, try GetRasterBand(10)
         print('Band ( %i ) not found, %s' % (band_num, e))
         sys.exit(1)
     if mask == 'default':
         maskband = srcband.GetMaskBand()
     elif mask is None or mask.upper() == 'NONE':
         maskband = None
     else:
         mask_ds = gdal.Open(mask)
         maskband = mask_ds.GetRasterBand(1)
     #  create output datasource
     if layername is None:
         layername = FileClass.get_core_name_without_suffix(rasterfile)
     drv = ogr_GetDriverByName(str('ESRI Shapefile'))
     dst_ds = drv.CreateDataSource(vectorshp)
     srs = None
     if src_ds.GetProjection() != '':
         srs = osr_SpatialReference()
         srs.ImportFromWkt(src_ds.GetProjection())
     dst_layer = dst_ds.CreateLayer(str(layername), srs=srs)
     if fieldname is None:
         fieldname = layername.upper()
     fd = ogr_FieldDefn(str(fieldname), OFTInteger)
     dst_layer.CreateField(fd)
     dst_field = 0
     result = gdal.Polygonize(srcband, maskband, dst_layer, dst_field,
                              ['8CONNECTED=8'], callback=None)
     return result
예제 #18
0
    def post_process_of_delineated_data(cfg):
        """Do some necessary transfer for subbasin, stream, and flow direction raster."""
        # inputs
        stream_net_file = cfg.taudems.streamnet_shp
        subbasin_file = cfg.taudems.subbsn_m
        flow_dir_file_tau = cfg.taudems.d8flow_m
        stream_raster_file = cfg.taudems.stream_m
        # outputs
        # -- shapefile
        shp_dir = cfg.dirs.geoshp
        UtilClass.mkdir(shp_dir)
        # ---- outlet, copy from DirNameUtils.TauDEM
        FileClass.copy_files(cfg.taudems.outlet_m, cfg.vecs.outlet)
        # ---- reaches
        output_reach_file = cfg.vecs.reach
        # ---- subbasins
        subbasin_vector_file = cfg.vecs.subbsn
        # -- raster file
        output_subbasin_file = cfg.spatials.subbsn
        output_flow_dir_file = cfg.spatials.d8flow
        output_stream_link_file = cfg.spatials.stream_link
        output_hillslope_file = cfg.spatials.hillslope

        id_map = StreamnetUtil.serialize_streamnet(stream_net_file, output_reach_file)
        RasterUtilClass.raster_reclassify(subbasin_file, id_map, output_subbasin_file, GDT_Int32)
        StreamnetUtil.assign_stream_id_raster(stream_raster_file, output_subbasin_file,
                                              output_stream_link_file)

        # Convert D8 encoding rule to ArcGIS
        D8Util.convert_code(flow_dir_file_tau, output_flow_dir_file)

        # convert raster to shapefile (for subbasin and basin)
        print('Generating subbasin vector...')
        VectorUtilClass.raster2shp(output_subbasin_file, subbasin_vector_file, 'subbasin',
                                   FieldNames.subbasin_id)
        mask_file = cfg.spatials.mask
        basin_vector = cfg.vecs.bsn
        print('Generating basin vector...')
        VectorUtilClass.raster2shp(mask_file, basin_vector, 'basin', FieldNames.basin)
        # delineate hillslope
        DelineateHillslope.downstream_method_whitebox(output_stream_link_file, flow_dir_file_tau,
                                                      output_hillslope_file)
예제 #19
0
파일: config.py 프로젝트: crazyzlj/SEIMS
    def __init__(self, cf, method='morris'):
        """Initialization."""
        self.method = method
        # 1. SEIMS model related
        self.model = ParseSEIMSConfig(cf)
        # 2. Common settings of parameters sensitivity analysis
        if 'PSA_Settings' not in cf.sections():
            raise ValueError("[PSA_Settings] section MUST be existed in *.ini file.")

        self.evaluate_params = list()
        if cf.has_option('PSA_Settings', 'evaluateparam'):
            eva_str = cf.get('PSA_Settings', 'evaluateparam')
            self.evaluate_params = StringClass.split_string(eva_str, ',')
        else:
            self.evaluate_params = ['Q']  # Default

        self.param_range_def = 'morris_param_rng.def'  # Default
        if cf.has_option('PSA_Settings', 'paramrngdef'):
            self.param_range_def = cf.get('PSA_Settings', 'paramrngdef')
        self.param_range_def = self.model.model_dir + os.path.sep + self.param_range_def
        if not FileClass.is_file_exists(self.param_range_def):
            raise IOError('Ranges of parameters MUST be provided!')

        if not (cf.has_option('PSA_Settings', 'psa_time_start') and
                cf.has_option('PSA_Settings', 'psa_time_end')):
            raise ValueError("Start and end time of PSA MUST be specified in [PSA_Settings].")
        try:
            # UTCTIME
            tstart = cf.get('PSA_Settings', 'psa_time_start')
            tend = cf.get('PSA_Settings', 'psa_time_end')
            self.psa_stime = StringClass.get_datetime(tstart)
            self.psa_etime = StringClass.get_datetime(tend)
        except ValueError:
            raise ValueError('The time format MUST be"YYYY-MM-DD HH:MM:SS".')
        if self.psa_stime >= self.psa_etime:
            raise ValueError("Wrong time settings in [PSA_Settings]!")

        # 3. Parameters settings for specific sensitivity analysis methods
        self.morris = None
        self.fast = None
        if self.method == 'fast':
            self.fast = FASTConfig(cf)
            self.psa_outpath = '%s/PSA-FAST-N%dM%d' % (self.model.model_dir,
                                                       self.fast.N, self.fast.M)
        elif self.method == 'morris':
            self.morris = MorrisConfig(cf)
            self.psa_outpath = '%s/PSA-Morris-N%dL%dJ%d' % (self.model.model_dir,
                                                            self.morris.N,
                                                            self.morris.num_levels,
                                                            self.morris.grid_jump)

        # Do not remove psa_outpath if already existed
        UtilClass.mkdir(self.psa_outpath)
        self.outfiles = PSAOutputs(self.psa_outpath)
예제 #20
0
파일: run_seims.py 프로젝트: crazyzlj/SEIMS
    def __init__(self, bin_dir='', model_dir='', nthread=4, lyrmtd=0,
                 host='127.0.0.1', port=27017, scenario_id=-1, calibration_id=-1,
                 version='OMP', nprocess=1, mpi_bin='', hosts_opt='-f', hostfile='',
                 **kwargs):  # Allow any other keyword arguments
        #  Derived from input arguments
        args_dict = dict()
        if 'args_dict' in kwargs:  # Preferred to use 'args_dict' if existed.
            args_dict = kwargs['args_dict']
        bin_dir = args_dict['bin_dir'] if 'bin_dir' in args_dict else bin_dir
        model_dir = args_dict['model_dir'] if 'model_dir' in args_dict else model_dir
        self.version = args_dict['version'] if 'version' in args_dict else version
        suffix = '.exe' if sysstr == 'Windows' else ''
        if self.version == 'MPI':
            self.seims_exec = bin_dir + os.path.sep + 'seims_mpi' + suffix
        else:
            self.seims_exec = bin_dir + os.path.sep + 'seims_omp' + suffix
            if not FileClass.is_file_exists(self.seims_exec):  # If not support OpenMP, use `seims`!
                self.seims_exec = bin_dir + os.path.sep + 'seims' + suffix
        self.seims_exec = os.path.abspath(self.seims_exec)
        self.model_dir = os.path.abspath(model_dir)

        self.nthread = args_dict['nthread'] if 'nthread' in args_dict else nthread
        self.lyrmtd = args_dict['lyrmtd'] if 'lyrmtd' in args_dict else lyrmtd
        self.host = args_dict['host'] if 'host' in args_dict else host
        self.port = args_dict['port'] if 'port' in args_dict else port
        self.scenario_id = args_dict['scenario_id'] if 'scenario_id' in args_dict else scenario_id
        self.calibration_id = args_dict[
            'calibration_id'] if 'calibration_id' in args_dict else calibration_id
        self.nprocess = args_dict['nprocess'] if 'nprocess' in args_dict else nprocess
        self.mpi_bin = args_dict['mpi_bin'] if 'mpi_bin' in args_dict else mpi_bin
        self.hosts_opt = args_dict['hosts_opt'] if 'hosts_opt' in args_dict else hosts_opt
        self.hostfile = args_dict['hostfile'] if 'hostfile' in args_dict else hostfile

        # Concatenate executable command
        self.cmd = self.Command
        self.run_success = False
        self.output_dir = self.OutputDirectory
        # Read model data from MongoDB
        self.db_name = os.path.split(self.model_dir)[1]
        self.outlet_id = self.OutletID
        self.start_time, self.end_time = self.SimulatedPeriod
        # Data maybe used after model run
        self.timespan = dict()
        self.obs_vars = list()  # Observation types at the outlet
        self.obs_value = dict()  # Observation value, key: DATETIME, value: value list of obs_vars
        self.sim_vars = list()  # Simulation types at the outlet, which is part of obs_vars
        self.sim_value = dict()  # Simulation value, same as obs_value
        # The format of sim_obs_dict:
        #         {VarName: {'UTCDATETIME': [t1, t2, ..., tn],
        #                    'Obs': [o1, o2, ..., on],
        #                    'Sim': [s1, s2, ..., sn]},
        #         ...
        #         }
        self.sim_obs_dict = dict()
예제 #21
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def d8hdisttostrm(np, p, src, dist, thresh, workingdir=None,
                   mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
     """Run D8 horizontal distance down to stream.
     """
     fname = TauDEM.func_name('d8hdisttostrm')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-p': p, '-src': src},
                       workingdir,
                       {'-thresh': thresh},
                       {'-dist': dist},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #22
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def moveoutletstostrm(np, flowdir, streamRaster, outlet, modifiedOutlet,
                       workingdir=None, mpiexedir=None,
                       exedir=None, log_file=None, runtime_file=None, hostfile=None):
     """Run move the given outlets to stream"""
     fname = TauDEM.func_name('moveoutletstostrm')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-p': flowdir, '-src': streamRaster, '-o': outlet},
                       workingdir,
                       None,
                       {'-om': modifiedOutlet},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #23
0
파일: vector.py 프로젝트: crazyzlj/PyGeoC
 def convert2geojson(jsonfile, src_srs, dst_srs, src_file):
     """convert shapefile to geojson file"""
     if os.path.exists(jsonfile):
         os.remove(jsonfile)
     if sysstr == 'Windows':
         exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix
     else:
         exepath = FileClass.get_executable_fullpath('ogr2ogr')
     # os.system(s)
     s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % (
         exepath, src_srs, dst_srs, jsonfile, src_file)
     UtilClass.run_command(s)
예제 #24
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def streamnet(np, filleddem, flowdir, acc, streamRaster, modifiedOutlet,
               streamOrder, chNetwork, chCoord, streamNet, subbasin, workingdir=None,
               mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
     """Run streamnet"""
     fname = TauDEM.func_name('streamnet')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-fel': filleddem, '-p': flowdir, '-ad8': acc, '-src': streamRaster,
                        '-o': modifiedOutlet}, workingdir,
                       None,
                       {'-ord': streamOrder, '-tree': chNetwork, '-coord': chCoord,
                        '-net': streamNet, '-w': subbasin},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #25
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def areadinf(np, angfile, sca, outlet=None, wg=None, edgecontaimination=False,
              workingdir=None, mpiexedir=None, exedir=None,
              log_file=None, runtime_file=None, hostfile=None):
     """Run Accumulate area according to Dinf flow direction"""
     # -nc means do not consider edge contaimination
     if edgecontaimination:
         in_params = {'-nc': None}
     else:
         in_params = None
     fname = TauDEM.func_name('areadinf')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-ang': angfile, '-o': outlet, '-wg': wg}, workingdir,
                       in_params,
                       {'-sca': sca},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #26
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def dinfdistdown(np, ang, fel, slp, src, statsm, distm, edgecontamination, wg, dist,
                  workingdir=None, mpiexedir=None, exedir=None,
                  log_file=None, runtime_file=None, hostfile=None):
     """Run D-inf distance down to stream"""
     in_params = {'-m': '%s %s' % (TauDEM.convertstatsmethod(statsm),
                                   TauDEM.convertdistmethod(distm))}
     if StringClass.string_match(edgecontamination, 'false') or edgecontamination is False:
         in_params['-nc'] = None
     fname = TauDEM.func_name('dinfdistdown')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-fel': fel, '-slp': slp, '-ang': ang, '-src': src, '-wg': wg},
                       workingdir,
                       in_params,
                       {'-dd': dist},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #27
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def dropanalysis(np, fel, p, ad8, ssa, outlet, minthresh, maxthresh, numthresh,
                  logspace, drp, workingdir=None,
                  mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None):
     """Drop analysis for optimal threshold for extracting stream."""
     parstr = '%f %f %f' % (minthresh, maxthresh, numthresh)
     if logspace == 'false':
         parstr += ' 1'
     else:
         parstr += ' 0'
     fname = TauDEM.func_name('dropanalysis')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-fel': fel, '-p': p, '-ad8': ad8, '-ssa': ssa, '-o': outlet},
                       workingdir,
                       {'-par': parstr},
                       {'-drp': drp},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #28
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
 def connectdown(np, p, acc, outlet, wtsd=None, workingdir=None, mpiexedir=None,
                 exedir=None, log_file=None, runtime_file=None, hostfile=None):
     """Reads an ad8 contributing area file,
     identifies the location of the largest ad8 value as the outlet of the largest watershed"""
     # If watershed is not specified, use acc to generate a mask layer.
     if wtsd is None or not os.path.isfile(wtsd):
         p, workingdir = TauDEM.check_infile_and_wp(p, workingdir)
         wtsd = workingdir + os.sep + 'wtsd_default.tif'
         RasterUtilClass.get_mask_from_raster(p, wtsd, True)
     fname = TauDEM.func_name('connectdown')
     return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir),
                       {'-p': p, '-ad8': acc, '-w': wtsd},
                       workingdir,
                       None,
                       {'-o': outlet},
                       {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np},
                       {'logfile': log_file, 'runtimefile': runtime_file})
예제 #29
0
파일: calibrate.py 프로젝트: crazyzlj/SEIMS
    def ParamDefs(self):
        """Read cali_param_rng.def file

           name,lower_bound,upper_bound

            e.g.,
             Param1,0,1
             Param2,0.5,1.2
             Param3,-1.0,1.0

        Returns:
            a dictionary containing:
            - names - the names of the parameters
            - bounds - a list of lists of lower and upper bounds
            - num_vars - a scalar indicating the number of variables
                         (the length of names)
        """
        # read param_defs.json if already existed
        if self.param_defs:
            return self.param_defs
        # read param_range_def file and output to json file
        client = ConnectMongoDB(self.cfg.model.host, self.cfg.model.port)
        conn = client.get_conn()
        db = conn[self.cfg.model.db_name]
        collection = db['PARAMETERS']

        names = list()
        bounds = list()
        num_vars = 0
        if not FileClass.is_file_exists(self.cfg.param_range_def):
            raise ValueError('Parameters definition file: %s is not'
                             ' existed!' % self.cfg.param_range_def)
        items = read_data_items_from_txt(self.cfg.param_range_def)
        for item in items:
            if len(item) < 3:
                continue
            # find parameter name, print warning message if not existed
            cursor = collection.find({'NAME': item[0]}, no_cursor_timeout=True)
            if not cursor.count():
                print('WARNING: parameter %s is not existed!' % item[0])
                continue
            num_vars += 1
            names.append(item[0])
            bounds.append([float(item[1]), float(item[2])])
        self.param_defs = {'names': names, 'bounds': bounds, 'num_vars': num_vars}
        return self.param_defs
예제 #30
0
    def mask_origin_delineated_data(cfg):
        """Mask the original delineated data by Subbasin raster."""
        subbasin_tau_file = cfg.taudems.subbsn
        geodata2dbdir = cfg.dirs.geodata2db
        UtilClass.mkdir(geodata2dbdir)
        mask_file = cfg.spatials.mask
        RasterUtilClass.get_mask_from_raster(subbasin_tau_file, mask_file)
        # Total 12 raster files
        original_files = [cfg.taudems.subbsn, cfg.taudems.d8flow, cfg.taudems.stream_raster,
                          cfg.taudems.slp, cfg.taudems.filldem, cfg.taudems.d8acc,
                          cfg.taudems.stream_order, cfg.taudems.dinf, cfg.taudems.dinf_d8dir,
                          cfg.taudems.dinf_slp, cfg.taudems.dinf_weight,
                          cfg.taudems.dist2stream_d8]
        # output masked files
        output_files = [cfg.taudems.subbsn_m, cfg.taudems.d8flow_m, cfg.taudems.stream_m,
                        cfg.spatials.slope, cfg.spatials.filldem, cfg.spatials.d8acc,
                        cfg.spatials.stream_order, cfg.spatials.dinf, cfg.spatials.dinf_d8dir,
                        cfg.spatials.dinf_slp, cfg.spatials.dinf_weight,
                        cfg.spatials.dist2stream_d8]

        default_values = list()
        for i in range(len(original_files)):
            default_values.append(DEFAULT_NODATA)

        # other input rasters need to be masked
        # soil and landuse
        FileClass.check_file_exists(cfg.soil)
        FileClass.check_file_exists(cfg.landuse)
        original_files.append(cfg.soil)
        output_files.append(cfg.spatials.soil_type)
        default_values.append(cfg.default_soil)
        original_files.append(cfg.landuse)
        output_files.append(cfg.spatials.landuse)
        default_values.append(cfg.default_landuse)

        # Additional raster file
        for k, v in cfg.additional_rs.items():
            org_v = v
            if not FileClass.is_file_exists(org_v):
                v = cfg.spatial_dir + os.path.sep + org_v
                if not FileClass.is_file_exists(v):
                    print('WARNING: The additional file %s MUST be located in '
                          'SPATIAL_DATA_DIR, or provided as full file path!' % k)
                    continue
            original_files.append(v)
            output_files.append(cfg.dirs.geodata2db + os.path.sep + k + '.tif')
            default_values.append(DEFAULT_NODATA)

        config_file = cfg.logs.mask_cfg
        # run mask operation
        print('Mask original delineated data by Subbasin raster...')
        SpatialDelineation.mask_raster_cpp(cfg.seims_bin, mask_file, original_files,
                                           output_files, default_values, config_file)
def interpolate_observed_data_to_regular_interval(in_file,
                                                  time_interval,
                                                  start_time,
                                                  end_time,
                                                  eliminate_zero=False,
                                                  time_sys_output='UTCTIME',
                                                  day_divided_hour=0):
    """
    Interpolate not regular observed data to regular time interval data.
    Args:
        in_file: input data file, the basic format is as follows:
                 line 1: #<time_system> [<time_zone>], e.g., #LOCALTIME 8, #UTCTIME
                 line 2: DATETIME,field1,field2,...
                 line 3: YYYY-mm-dd HH:MM:SS,field1_value,field2_value,...
                 line 4: ...
                 ...
                 Field name can be PCP, FLOW, SED
                 the unit is mm/h, m3/s, g/L (i.e., kg/m3), respectively.
        time_interval: time interval, unit is minute, e.g., daily output is 1440
        start_time: start time, the format must be 'YYYY-mm-dd HH:MM:SS', and the time system
                    is based on time_sys.
        end_time: end time, see also start_time.
        eliminate_zero: Boolean flag. If true, the time interval without original records will
                        not be output.
        time_sys_output: time system of output time_system, the format must be
                  '<time_system> [<time_zone>]', e.g.,
                  'LOCALTIME'
                  'LOCALTIME 8'
                  'UTCTIME' (default)
        day_divided_hour: If the time_interval is equal to N*1440, this parameter should be
                          carefully specified. The value must range from 0 to 23. e.g.,
                          day_divided_hour ==> day ranges (all expressed as 2013-02-03)
                          0  ==> 2013-02-03 00:00:00 to 2013-02-03 23:59:59 (default)
                          8  ==> 2013-02-03 08:00:00 to 2013-02-04 07:59:59
                          20 ==> 2013-02-03 20:00:00 to 2013-02-04 19:59:59
    Returns:
        The output data files are located in the same directory with the input file.
        The nomenclature is: <field name>_<time system>_<time interval>_<nonzero>, e.g.,
        pcp_utctime_1440_nonzero.txt, flow_localtime_60.txt
    """
    FileClass.check_file_exists(in_file)
    time_sys_input, time_zone_input = HydroClimateUtilClass.get_time_system_from_data_file(
        in_file)
    data_items = read_data_items_from_txt(in_file)
    flds = data_items[0][:]
    data_items.remove(flds)
    if not 0 <= day_divided_hour <= 23:
        raise ValueError('Day divided hour must range from 0 to 23!')
    try:
        date_idx = flds.index('DATETIME')
        flds.remove('DATETIME')
    except ValueError:
        raise ValueError('DATETIME must be one of the fields!')
    # available field
    available_flds = ['FLOW', 'SED', 'PCP']

    def check_avaiable_field(cur_fld):
        """Check if the given field name is supported."""
        support_flag = False
        for fff in available_flds:
            if fff.lower() in cur_fld.lower():
                support_flag = True
                break
        return support_flag

    ord_data = OrderedDict()
    time_zone_output = time.timezone / -3600
    if time_sys_output.lower().find('local') >= 0:
        tmpstrs = StringClass.split_string(time_sys_output, [' '])
        if len(tmpstrs) == 2 and MathClass.isnumerical(tmpstrs[1]):
            time_zone_output = int(tmpstrs[1])
        time_sys_output = 'LOCALTIME'
    else:
        time_sys_output = 'UTCTIME'
        time_zone_output = 0
    for item in data_items:
        org_datetime = StringClass.get_datetime(item[date_idx])
        if time_sys_input == 'LOCALTIME':
            org_datetime -= timedelta(hours=time_zone_input)
        # now, org_datetime is UTC time.
        if time_sys_output == 'LOCALTIME':
            org_datetime += timedelta(hours=time_zone_output)
        # now, org_datetime is consistent with the output time system
        ord_data[org_datetime] = list()
        for i, v in enumerate(item):
            if i == date_idx:
                continue
            if MathClass.isnumerical(v):
                ord_data[org_datetime].append(float(v))
            else:
                ord_data[org_datetime].append(v)
    # print(ord_data)
    itp_data = OrderedDict()
    out_time_delta = timedelta(minutes=time_interval)
    sdatetime = StringClass.get_datetime(start_time)
    edatetime = StringClass.get_datetime(end_time)
    item_dtime = sdatetime
    if time_interval % 1440 == 0:
        item_dtime = sdatetime.replace(hour=0, minute=0, second=0) + \
                     timedelta(minutes=day_divided_hour * 60)
    while item_dtime <= edatetime:
        # print(item_dtime)
        # if item_dtime.month == 12 and item_dtime.day == 31:
        #     print("debug")
        sdt = item_dtime  # start datetime of records
        edt = item_dtime + out_time_delta  # end datetime of records
        # get original data items
        org_items = list()
        pre_dt = list(ord_data.keys())[0]
        pre_added = False
        for i, v in list(ord_data.items()):
            if sdt <= i < edt:
                if not pre_added and pre_dt < sdt < i and sdt - pre_dt < out_time_delta:
                    # only add one item that less than sdt.
                    org_items.append([pre_dt] + ord_data.get(pre_dt))
                    pre_added = True
                org_items.append([i] + v)
            if i > edt:
                break
            pre_dt = i
        if len(org_items) > 0:
            org_items.append([edt])  # Just add end time for compute convenient
            if org_items[0][0] < sdt:
                org_items[0][
                    0] = sdt  # set the begin datetime of current time interval
        # if eliminate time interval without original records
        # initial interpolated list
        itp_data[item_dtime] = [0.] * len(flds)
        if len(org_items) == 0:
            if eliminate_zero:
                itp_data.popitem()
            item_dtime += out_time_delta
            continue
        # core interpolation code
        flow_idx = -1
        for v_idx, v_name in enumerate(flds):
            if not check_avaiable_field(v_name):
                continue
            if 'SED' in v_name.upper():  # FLOW must be existed
                for v_idx2, v_name2 in enumerate(flds):
                    if 'FLOW' in v_name2.upper():
                        flow_idx = v_idx2
                        break
                if flow_idx < 0:
                    raise RuntimeError(
                        'To interpolate SED, FLOW must be provided!')
        for v_idx, v_name in enumerate(flds):
            if not check_avaiable_field(v_name):
                continue
            itp_value = 0.
            itp_auxiliary_value = 0.
            for org_item_idx, org_item_dtv in enumerate(org_items):
                if org_item_idx == 0:
                    continue
                org_item_dt = org_item_dtv[0]
                pre_item_dtv = org_items[org_item_idx - 1]
                pre_item_dt = pre_item_dtv[0]
                tmp_delta_dt = org_item_dt - pre_item_dt
                tmp_delta_secs = tmp_delta_dt.days * 86400 + tmp_delta_dt.seconds
                if 'SED' in v_name.upper():
                    itp_value += pre_item_dtv[v_idx + 1] * pre_item_dtv[flow_idx + 1] * \
                                 tmp_delta_secs
                    itp_auxiliary_value += pre_item_dtv[flow_idx +
                                                        1] * tmp_delta_secs
                else:
                    itp_value += pre_item_dtv[v_idx + 1] * tmp_delta_secs
            if 'SED' in v_name.upper():
                if MathClass.floatequal(itp_auxiliary_value, 0.):
                    itp_value = 0.
                    print('WARNING: Flow is 0 for %s, please check!' %
                          item_dtime.strftime('%Y-%m-%d %H:%M:%S'))
                itp_value /= itp_auxiliary_value
            elif 'FLOW' in v_name.upper():
                itp_value /= (out_time_delta.days * 86400 +
                              out_time_delta.seconds)
            elif 'PCP' in v_name.upper(
            ):  # the input is mm/h, and output is mm
                itp_value /= 3600.
            itp_data[item_dtime][v_idx] = round(itp_value, 4)
        item_dtime += out_time_delta

    # for i, v in itp_data.items():
    #     print(i, v)
    # output to files
    work_path = os.path.dirname(in_file)
    header_str = '#' + time_sys_output
    if time_sys_output == 'LOCALTIME':
        header_str = header_str + ' ' + str(time_zone_output)
    for idx, fld in enumerate(flds):
        if not check_avaiable_field(fld):
            continue
        file_name = fld + '_' + time_sys_output + '_' + str(time_interval)
        if eliminate_zero:
            file_name += '_nonzero'
        file_name += '.txt'
        out_file = work_path + os.path.sep + file_name
        with open(out_file, 'w') as f:
            f.write(header_str + '\n')
            f.write('DATETIME,' + fld + '\n')
            for i, v in list(itp_data.items()):
                cur_line = i.strftime('%Y-%m-%d %H:%M:%S') + ',' + str(
                    v[idx]) + '\n'
                f.write(cur_line)
예제 #32
0
    def scenario_from_texts(cfg, main_db, scenario_db):
        """Import BMPs Scenario data to MongoDB
        Args:
            cfg: SEIMS configuration object
            main_db: climate database
            scenario_db: scenario database
        Returns:
            False if failed, otherwise True.
        """
        if not cfg.use_scernario:
            return False
        print('Import BMP Scenario Data... ')
        bmp_files = FileClass.get_filename_by_suffixes(cfg.scenario_dir,
                                                       ['.txt', '.csv'])
        bmp_tabs = list()
        bmp_tabs_path = list()
        for f in bmp_files:
            bmp_tabs.append(f.split('.')[0])
            bmp_tabs_path.append(cfg.scenario_dir + os.path.sep + f)

        # initialize if collection not existed
        c_list = scenario_db.collection_names()
        for item in bmp_tabs:
            if not StringClass.string_in_list(item.upper(), c_list):
                scenario_db.create_collection(item.upper())
            else:
                scenario_db.drop_collection(item.upper())
        # Read subbasin.tif and dist2Stream.tif
        subbasin_r = RasterUtilClass.read_raster(cfg.spatials.subbsn)
        dist2stream_r = RasterUtilClass.read_raster(
            cfg.spatials.dist2stream_d8)
        # End reading
        for j, bmp_txt in enumerate(bmp_tabs_path):
            bmp_tab_name = bmp_tabs[j]
            data_array = read_data_items_from_txt(bmp_txt)
            field_array = data_array[0]
            data_array = data_array[1:]
            for item in data_array:
                dic = dict()
                for i, field_name in enumerate(field_array):
                    if MathClass.isnumerical(item[i]):
                        v = float(item[i])
                        if v % 1. == 0.:
                            v = int(v)
                        dic[field_name.upper()] = v
                    else:
                        dic[field_name.upper()] = str(item[i]).upper()
                if StringClass.string_in_list(ImportScenario2Mongo._LocalX, list(dic.keys())) and \
                        StringClass.string_in_list(ImportScenario2Mongo._LocalY, list(dic.keys())):
                    subbsn_id = subbasin_r.get_value_by_xy(
                        dic[ImportScenario2Mongo._LocalX.upper()],
                        dic[ImportScenario2Mongo._LocalY.upper()])
                    distance = dist2stream_r.get_value_by_xy(
                        dic[ImportScenario2Mongo._LocalX.upper()],
                        dic[ImportScenario2Mongo._LocalY.upper()])
                    if subbsn_id is not None and distance is not None:
                        dic[ImportScenario2Mongo._SUBBASINID] = int(subbsn_id)
                        dic[ImportScenario2Mongo._DISTDOWN] = float(distance)
                        scenario_db[bmp_tab_name.upper()].find_one_and_replace(
                            dic, dic, upsert=True)
                else:
                    scenario_db[bmp_tab_name.upper()].find_one_and_replace(
                        dic, dic, upsert=True)
        # print('BMP tables are imported.')
        # Write BMP database name into Model workflow database
        c_list = main_db.collection_names()
        if not StringClass.string_in_list(DBTableNames.main_scenario, c_list):
            main_db.create_collection(DBTableNames.main_scenario)

        bmp_info_dic = dict()
        bmp_info_dic[ImportScenario2Mongo._FLD_DB] = cfg.bmp_scenario_db
        main_db[DBTableNames.main_scenario].find_one_and_replace(bmp_info_dic,
                                                                 bmp_info_dic,
                                                                 upsert=True)
        return True
예제 #33
0
    def __init__(
        self,
        bin_dir='',  # type: AnyStr # The directory of SEIMS binary
        model_dir='',  # type: AnyStr # The directory of SEIMS model
        nthread=4,  # type: int # Thread number for OpenMP
        lyrmtd=0,  # type: int # Layering method, can be 0 (UP_DOWN) or 1 (DOWN_UP)
        host='127.0.0.1',  # type: AnyStr # MongoDB host address, default is `localhost`
        port=27017,  # type: int # MongoDB port, default is 27017
        scenario_id=-1,  # type: int # Scenario ID defined in `<model>_Scenario` database
        calibration_id=-1,  # type: int # Calibration ID used for model auto-calibration
        version='OMP',  # type: AnyStr # SEIMS version, can be `MPI` or `OMP` (default)
        nprocess=1,  # type: int # Process number for MPI
        mpi_bin='',  # type: AnyStr # Full path of MPI executable file, e.g., './mpirun`
        hosts_opt='-f',  # type: AnyStr # Option for assigning hosts,
        # e.g., `-f`, `-hostfile`, `-machine`, `-machinefile`
        hostfile='',  # type: AnyStr # File containing host names,
        # or file mapping process numbers to machines
        simu_stime=None,  # type: Optional[datetime, AnyStr] # Start time of simulation
        simu_etime=None,  # type: Optional[datetime, AnyStr] # End time of simulation
        args_dict=None  # type: Dict[AnyStr, Optional[AnyStr, datetime, int]]
    ):
        # type: (...) -> None
        #  Derived from input arguments
        if args_dict is None:  # Preferred to use 'args_dict' if existed.
            args_dict = dict()
        bin_dir = args_dict['bin_dir'] if 'bin_dir' in args_dict else bin_dir
        model_dir = args_dict[
            'model_dir'] if 'model_dir' in args_dict else model_dir
        self.version = args_dict[
            'version'] if 'version' in args_dict else version
        suffix = '.exe' if sysstr == 'Windows' else ''
        if self.version == 'MPI':
            self.seims_exec = '%s/seims_mpi%s' % (bin_dir, suffix)
        else:
            self.seims_exec = '%s/seims_omp%s' % (bin_dir, suffix)
            if not FileClass.is_file_exists(
                    self.seims_exec):  # If not support OpenMP, use `seims`!
                self.seims_exec = '%s/seims%s' % (bin_dir, suffix)
        self.seims_exec = os.path.abspath(self.seims_exec)
        self.model_dir = os.path.abspath(model_dir)

        self.nthread = args_dict[
            'nthread'] if 'nthread' in args_dict else nthread
        self.lyrmtd = args_dict['lyrmtd'] if 'lyrmtd' in args_dict else lyrmtd
        self.host = args_dict['host'] if 'host' in args_dict else host
        self.port = args_dict['port'] if 'port' in args_dict else port
        self.scenario_id = args_dict[
            'scenario_id'] if 'scenario_id' in args_dict else scenario_id
        self.calibration_id = args_dict['calibration_id'] \
            if 'calibration_id' in args_dict else calibration_id
        self.nprocess = args_dict[
            'nprocess'] if 'nprocess' in args_dict else nprocess
        self.mpi_bin = args_dict[
            'mpi_bin'] if 'mpi_bin' in args_dict else mpi_bin
        self.hosts_opt = args_dict[
            'hosts_opt'] if 'hosts_opt' in args_dict else hosts_opt
        self.hostfile = args_dict[
            'hostfile'] if 'hostfile' in args_dict else hostfile
        self.simu_stime = args_dict[
            'simu_stime'] if 'simu_stime' in args_dict else simu_stime
        self.simu_etime = args_dict[
            'simu_etime'] if 'simu_etime' in args_dict else simu_etime
        if is_string(
                self.simu_stime) and not isinstance(self.simu_stime, datetime):
            self.simu_stime = StringClass.get_datetime(self.simu_stime)
        if is_string(
                self.simu_etime) and not isinstance(self.simu_etime, datetime):
            self.simu_etime = StringClass.get_datetime(self.simu_etime)

        # Concatenate executable command
        self.cmd = self.Command
        self.run_success = False
        self.output_dir = self.OutputDirectory
        # Read model data from MongoDB
        self.db_name = os.path.split(self.model_dir)[1]
        self.outlet_id = self.OutletID
        self.start_time, self.end_time = self.SimulatedPeriod  # Note: Times period in FILE_IN.
        self.output_items = dict()  # type: Dict[AnyStr, Union[List[AnyStr]]]
        # Data maybe used after model run
        self.timespan = dict(
        )  # type: Dict[AnyStr, Dict[AnyStr, Union[float, Dict[AnyStr, float]]]]
        self.obs_vars = list(
        )  # type: List[AnyStr]  # Observation types at the outlet
        self.obs_value = dict(
        )  # type: Dict[datetime, List[float]] # Observation value
        self.sim_vars = list(
        )  # type: List[AnyStr]  # Simulation types, part of `obs_vars`
        self.sim_value = dict(
        )  # type: Dict[datetime, List[float]] # Simulation value
        # The format of sim_obs_dict:
        #         {VarName: {'UTCDATETIME': [t1, t2, ..., tn],
        #                    'Obs': [o1, o2, ..., on],
        #                    'Sim': [s1, s2, ..., sn]},
        #         ...
        #         }
        self.sim_obs_dict = dict(
        )  # type: Dict[AnyStr, Dict[AnyStr, Union[float, List[Union[datetime, float]]]]]
        self.runtime = 0.
        self.runlogs = list()  # type: List[AnyStr]
예제 #34
0
파일: TauDEM.py 프로젝트: alameday/PyGeoC
    def run(function_name, in_files, wp=None, in_params=None, out_files=None, mpi_params=None,
            log_params=None):
        """
        Run TauDEM function.

         - 1. The command will not execute if any input file does not exist.
         - 2. An error will be detected after running the TauDEM command if
              any output file does not exist;

        Args:
            function_name (str): Full path of TauDEM function.
            in_files (dict, required): Dict of pairs of parameter id (string) and file path
                (string or list) for input files, e.g.::

                    {'-z': '/full/path/to/dem.tif'}

            wp (str, optional): Workspace for outputs. If not specified, the directory of the
                first input file in ``in_files`` will be used.
            in_params (dict, optional): Dict of pairs of parameter id (string) and value
                (or None for a flag parameter without a value) for input parameters, e.g.::

                    {'-nc': None}
                    {'-thresh': threshold}
                    {'-m': 'ave' 's', '-nc': None}

            out_files (dict, optional): Dict of pairs of parameter id (string) and file
                path (string or list) for output files, e.g.::

                    {'-fel': 'filleddem.tif'}
                    {'-maxS': ['harden.tif', 'maxsimi.tif']}

            mpi_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for MPI setting, e.g.::

                    {'mpipath':'/soft/bin','hostfile':'/soft/bin/cluster.node','n':4}
                    {'mpipath':'/soft/bin', 'n':4}
                    {'n':4}

            log_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for runtime and log output parameters. e.g.::

                    {'logfile': '/home/user/log.txt',
                     'runtimefile': '/home/user/runtime.txt'}

        Returns:
            True if TauDEM run successfully, otherwise False.
        """
        # Check input files
        if in_files is None:
            TauDEM.error('Input files parameter is required!')
        if not isinstance(in_files, dict):
            TauDEM.error('The input files parameter must be a dict!')
        for (pid, infile) in iteritems(in_files):
            if infile is None:
                continue
            if isinstance(infile, list) or isinstance(infile, tuple):
                for idx, inf in enumerate(infile):
                    if inf is None:
                        continue
                    inf, wp = TauDEM.check_infile_and_wp(inf, wp)
                    in_files[pid][idx] = inf
                continue
            if os.path.exists(infile):
                infile, wp = TauDEM.check_infile_and_wp(infile, wp)
                in_files[pid] = os.path.abspath(infile)
            else:
                # For more flexible input files extension.
                # e.g., -inputtags 1 <path/to/tag1.tif> 2 <path/to/tag2.tif> ...
                # in such unpredictable circumstance, we cannot check the existance of
                # input files, so the developer will check it in other place.
                if len(StringClass.split_string(infile, ' ')) > 1:
                    continue
                else:  # the infile still should be a existing file, so check in workspace
                    if wp is None:
                        TauDEM.error('Workspace should not be None!')
                    infile = wp + os.sep + infile
                    if not os.path.exists(infile):
                        TauDEM.error('Input files parameter %s: %s is not existed!' %
                                     (pid, infile))
                    in_files[pid] = os.path.abspath(infile)
        # Make workspace dir if not existed
        UtilClass.mkdir(wp)
        # Check the log parameter
        log_file = None
        runtime_file = None
        if log_params is not None:
            if not isinstance(log_params, dict):
                TauDEM.error('The log parameter must be a dict!')
            if 'logfile' in log_params and log_params['logfile'] is not None:
                log_file = log_params['logfile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in log_file:
                    log_file = wp + os.sep + log_file
                    log_file = os.path.abspath(log_file)
            if 'runtimefile' in log_params and log_params['runtimefile'] is not None:
                runtime_file = log_params['runtimefile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in runtime_file:
                    runtime_file = wp + os.sep + runtime_file
                    runtime_file = os.path.abspath(runtime_file)

        # remove out_files to avoid any file IO related error
        new_out_files = list()
        if out_files is not None:
            if not isinstance(out_files, dict):
                TauDEM.error('The output files parameter must be a dict!')
            for (pid, out_file) in iteritems(out_files):
                if out_file is None:
                    continue
                if isinstance(out_file, list) or isinstance(out_file, tuple):
                    for idx, outf in enumerate(out_file):
                        if outf is None:
                            continue
                        outf = FileClass.get_file_fullpath(outf, wp)
                        FileClass.remove_files(outf)
                        out_files[pid][idx] = outf
                        new_out_files.append(outf)
                else:
                    out_file = FileClass.get_file_fullpath(out_file, wp)
                    FileClass.remove_files(out_file)
                    out_files[pid] = out_file
                    new_out_files.append(out_file)

        # concatenate command line
        commands = list()
        # MPI header
        if mpi_params is not None:
            if not isinstance(mpi_params, dict):
                TauDEM.error('The MPI settings parameter must be a dict!')
            if 'mpipath' in mpi_params and mpi_params['mpipath'] is not None:
                commands.append(mpi_params['mpipath'] + os.sep + 'mpiexec')
            else:
                commands.append('mpiexec')
            if 'hostfile' in mpi_params and mpi_params['hostfile'] is not None \
                    and not StringClass.string_match(mpi_params['hostfile'], 'none') \
                    and os.path.isfile(mpi_params['hostfile']):
                commands.append('-f')
                commands.append(mpi_params['hostfile'])
            if 'n' in mpi_params and mpi_params['n'] > 1:
                commands.append('-n')
                commands.append(str(mpi_params['n']))
            else:  # If number of processor is less equal than 1, then do not call mpiexec.
                commands = []
        # append TauDEM function name, which can be full path or just one name
        commands.append(function_name)
        # append input files
        for (pid, infile) in iteritems(in_files):
            if infile is None:
                continue
            if pid[0] != '-':
                pid = '-' + pid
            commands.append(pid)
            if isinstance(infile, list) or isinstance(infile, tuple):
                commands.append(' '.join(tmpf for tmpf in infile))
            else:
                commands.append(infile)
        # append input parameters
        if in_params is not None:
            if not isinstance(in_params, dict):
                TauDEM.error('The input parameters must be a dict!')
            for (pid, v) in iteritems(in_params):
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                # allow for parameter which is an flag without value
                if v != '' and v is not None:
                    if MathClass.isnumerical(v):
                        commands.append(str(v))
                    else:
                        commands.append(v)
        # append output parameters
        if out_files is not None:
            for (pid, outfile) in iteritems(out_files):
                if outfile is None:
                    continue
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                if isinstance(outfile, list) or isinstance(outfile, tuple):
                    commands.append(' '.join(tmpf for tmpf in outfile))
                else:
                    commands.append(outfile)
        # run command
        runmsg = UtilClass.run_command(commands)
        TauDEM.log(runmsg, log_file)
        TauDEM.output_runtime_to_log(function_name, runmsg, runtime_file)
        # Check out_files, raise RuntimeError if not exist.
        for of in new_out_files:
            if not os.path.exists(of):
                TauDEM.error('%s failed, and the %s was not generated!' % (function_name, of))
                return False
        return True
예제 #35
0
    def read_param_ranges(self):
        """Read param_rng.def file

           name,lower_bound,upper_bound,group,dist
           (group and dist are optional)

            e.g.,
             Param1,0,1[,Group1][,dist1]
             Param2,0,1[,Group2][,dist2]
             Param3,0,1[,Group3][,dist3]

        Returns:
            a dictionary containing:
            - names - the names of the parameters
            - bounds - a list of lists of lower and upper bounds
            - num_vars - a scalar indicating the number of variables
                         (the length of names)
            - groups - a list of group names (strings) for each variable
            - dists - a list of distributions for the problem,
                        None if not specified or all uniform
        """
        # read param_defs.json if already existed
        if not self.param_defs:
            if FileClass.is_file_exists(self.cfg.outfiles.param_defs_json):
                with open(self.cfg.outfiles.param_defs_json, 'r') as f:
                    self.param_defs = UtilClass.decode_strs_in_dict(
                        json.load(f))
                return
        # read param_range_def file and output to json file
        client = ConnectMongoDB(self.model.host, self.model.port)
        conn = client.get_conn()
        db = conn[self.model.db_name]
        collection = db['PARAMETERS']

        names = list()
        bounds = list()
        groups = list()
        dists = list()
        num_vars = 0
        items = read_data_items_from_txt(self.cfg.param_range_def)
        for item in items:
            if len(item) < 3:
                continue
            # find parameter name, print warning message if not existed
            cursor = collection.find({'NAME': item[0]}, no_cursor_timeout=True)
            if not cursor.count():
                print('WARNING: parameter %s is not existed!' % item[0])
                continue
            num_vars += 1
            names.append(item[0])
            bounds.append([float(item[1]), float(item[2])])
            # If the fourth column does not contain a group name, use
            # the parameter name
            if len(item) >= 4:
                groups.append(item[3])
            else:
                groups.append(item[0])
            if len(item) >= 5:
                dists.append(item[4])
            else:
                dists.append('unif')
        if groups == names:
            groups = None
        elif len(set(groups)) == 1:
            raise ValueError(
                'Only one group defined, results will not bemeaningful')

        # setting dists to none if all are uniform
        # because non-uniform scaling is not needed
        if all([d == 'unif' for d in dists]):
            dists = None

        self.param_defs = {
            'names': names,
            'bounds': bounds,
            'num_vars': num_vars,
            'groups': groups,
            'dists': dists
        }

        # Save as json, which can be loaded by json.load()
        json_data = json.dumps(self.param_defs,
                               indent=4,
                               cls=SpecialJsonEncoder)
        with open(self.cfg.outfiles.param_defs_json, 'w') as f:
            f.write(json_data)
예제 #36
0
    def evaluate_models(self):
        """Run SEIMS for objective output variables, and write out.
        """
        if self.output_values is None or len(self.output_values) == 0:
            if FileClass.is_file_exists(self.cfg.outfiles.output_values_txt):
                self.output_values = numpy.loadtxt(
                    self.cfg.outfiles.output_values_txt)
                return
        assert (self.run_count > 0)

        # model configurations
        model_cfg_dict = self.model.ConfigDict

        # Parameters to be evaluated
        input_eva_vars = self.cfg.evaluate_params

        # split tasks if needed
        task_num = self.run_count // 480  # In our cluster, the largest workers number is 96.
        if task_num == 0:
            split_seqs = [range(self.run_count)]
        else:
            split_seqs = numpy.array_split(numpy.arange(self.run_count),
                                           task_num + 1)
            split_seqs = [a.tolist() for a in split_seqs]

        # Loop partitioned tasks
        run_model_stime = time.time()
        exec_times = list()  # execute time of all model runs
        for idx, cali_seqs in enumerate(split_seqs):
            cur_out_file = '%s/outputs_%d.txt' % (
                self.cfg.outfiles.output_values_dir, idx)
            if FileClass.is_file_exists(cur_out_file):
                continue
            model_cfg_dict_list = list()
            for i, caliid in enumerate(cali_seqs):
                tmpcfg = deepcopy(model_cfg_dict)
                tmpcfg['calibration_id'] = caliid
                model_cfg_dict_list.append(tmpcfg)
            try:  # parallel on multiprocessor or clusters using SCOOP
                from scoop import futures
                output_models = list(
                    futures.map(create_run_model, model_cfg_dict_list))
            except ImportError or ImportWarning:  # serial
                output_models = list(map(create_run_model,
                                         model_cfg_dict_list))
            time.sleep(
                0.1
            )  # Wait a moment in case of unpredictable file system error
            # Read observation data from MongoDB only once
            if len(
                    output_models
            ) < 1:  # Although this is not gonna happen, just for insurance.
                continue
            obs_vars, obs_data_dict = output_models[0].ReadOutletObservations(
                input_eva_vars)
            if (len(obs_vars)) < 1:  # Make sure the observation data exists.
                continue
            # Loop the executed models
            eva_values = list()
            for imod, mod_obj in enumerate(output_models):
                # Read executable timespan of each model run
                exec_times.append(mod_obj.GetTimespan())
                # Set observation data since there is no need to read from MongoDB.
                if imod != 0:
                    mod_obj.SetOutletObservations(obs_vars, obs_data_dict)
                # Read simulation
                mod_obj.ReadTimeseriesSimulations(self.cfg.psa_stime,
                                                  self.cfg.psa_etime)
                # Calculate NSE, R2, RMSE, PBIAS, RSR, ln(NSE), NSE1, and NSE3
                self.objnames, obj_values = mod_obj.CalcTimeseriesStatistics(
                    mod_obj.sim_obs_dict)
                eva_values.append(obj_values)
                # delete model output directory for saving storage
                rmtree(mod_obj.output_dir)
            if not isinstance(eva_values, numpy.ndarray):
                eva_values = numpy.array(eva_values)
            numpy.savetxt(cur_out_file, eva_values, delimiter=' ', fmt='%.4f')
            # Save as pickle data for further usage. DO not save all models which maybe very large!
            cur_model_out_file = '%s/models_%d.pickle' % (
                self.cfg.outfiles.output_values_dir, idx)
            with open(cur_model_out_file, 'wb') as f:
                pickle.dump(output_models, f)
        exec_times = numpy.array(exec_times)
        numpy.savetxt('%s/exec_time_allmodelruns.txt' % self.cfg.psa_outpath,
                      exec_times,
                      delimiter=' ',
                      fmt='%.4f')
        print('Running time of all SEIMS models:\n'
              '\tIO\tCOMP\tSIMU\tRUNTIME\n'
              'MAX\t%s\n'
              'MIN\t%s\n'
              'AVG\t%s\n'
              'SUM\t%s\n' %
              ('\t'.join('%.3f' % v for v in exec_times.max(0)), '\t'.join(
                  '%.3f' % v for v in exec_times.min(0)), '\t'.join(
                      '%.3f' % v for v in exec_times.mean(0)), '\t'.join(
                          '%.3f' % v for v in exec_times.sum(0))))
        print('Running time of executing SEIMS models: %.2fs' %
              (time.time() - run_model_stime))
        # Save objective names as pickle data for further usgae
        with open('%s/objnames.pickle' % self.cfg.psa_outpath, 'wb') as f:
            pickle.dump(self.objnames, f)

        # load the first part of output values
        self.output_values = numpy.loadtxt('%s/outputs_0.txt' %
                                           self.cfg.outfiles.output_values_dir)
        if task_num == 0:
            import shutil
            shutil.move(
                '%s/outputs_0.txt' % self.cfg.outfiles.output_values_dir,
                self.cfg.outfiles.output_values_txt)
            shutil.rmtree(self.cfg.outfiles.output_values_dir)
            return
        for idx in range(1, task_num + 1):
            tmp_outputs = numpy.loadtxt(
                '%s/outputs_%d.txt' %
                (self.cfg.outfiles.output_values_dir, idx))
            self.output_values = numpy.concatenate(
                (self.output_values, tmp_outputs))
        numpy.savetxt(self.cfg.outfiles.output_values_txt,
                      self.output_values,
                      delimiter=' ',
                      fmt='%.4f')
예제 #37
0
    def downstream_method_whitebox(stream_raster,
                                   flow_dir_raster,
                                   hillslope_out,
                                   d8alg="taudem",
                                   stream_value_method=-1):
        """Algorithm modified from Whitebox GAT v3.4.0.
           source code: https://github.com/jblindsay/whitebox-geospatial-analysis-tools/
                                blob/master/HydroTools/src/plugins/Hillslopes.java
        Args:
            stream_raster: Stream cell value greater than 0 is identified by stream
                              The input stream are recommended sequenced as 1, 2, 3...
            flow_dir_raster: D8 flow direction in TauDEM code
            hillslope_out: With the sequenced stream IDs, the output hillslope will be numbered:
                                  - Header hillslope: MaxStreamID + (current_id - 1) * 3 + 1
                                  - Right hillslope: MaxStreamID + (current_id - 1) * 3 + 2
                                  - Left hillslope: MaxStreamID + (current_id - 1) * 3 + 3
            d8alg: Currently, "TauDEM", "ArcGIS", and "Whitebox" are supported.
            stream_value_method:  stream value assigned method, depend on this parameter,
                              the output hillslope will be appended as follows:
               -1 - all the four files will be output.
                0 - keep stream link code, which has the default file name
                1 - Set to the value of right hillslope and head hillslope, <name>_r.tif
                2 - Set to the value of left hillslope and head hillslope, <name>_l.tif
                3 - Set stream cell to NoData, <name>_n.tif
        """
        print('Delineating hillslopes (header, left, and right hillslope)...')
        streamr = RasterUtilClass.read_raster(stream_raster)
        stream_data = streamr.data
        stream_nodata = streamr.noDataValue
        geotrans = streamr.geotrans
        srs = streamr.srs
        nrows = streamr.nRows
        ncols = streamr.nCols
        datatype = streamr.dataType

        flowd8r = RasterUtilClass.read_raster(flow_dir_raster)
        flowd8_data = flowd8r.data
        flowd8_nodata = flowd8r.noDataValue
        if flowd8r.nRows != nrows or flowd8r.nCols != ncols:
            raise ValueError("The input extent of D8 flow direction is not "
                             "consistent with stream data!")

        # definition of utility functions

        def inflow_stream_number(vrow, vcol, flowmodel="taudem"):
            """
            Count the inflow stream cell number and coordinates of all inflow cells
            Args:
                vrow: row number
                vcol: col number
                flowmodel: D8 flow direction algorithm.
            Returns:
                neighb_stream_cell_num: inflow cells number that is stream
                cell_coors: inflow cell coordinates, the size() is equal or greater
                            than neighb_stream_cell_num
            """
            neighb_stream_cell_num = 0
            cell_coors = []
            for c in range(8):
                newrow = vrow + FlowModelConst.ccw_drow[c]
                newcol = vcol + FlowModelConst.ccw_dcol[c]
                if newrow < 0 or newrow >= nrows or newcol < 0 or newcol >= ncols:
                    continue
                if flowd8_data[newrow][
                        newcol] == FlowModelConst.d8_inflows.get(flowmodel)[c]:
                    cell_coors.append((newrow, newcol))
                    if stream_data[newrow][newcol] > 0 \
                            and stream_data[newrow][newcol] != stream_nodata:
                        neighb_stream_cell_num += 1
            return neighb_stream_cell_num, cell_coors

        def assign_sequenced_stream_ids(c_id, vrow, vcol, flowmodel="taudem"):
            """set sequenced stream IDs"""
            in_strm_num, in_coors = inflow_stream_number(vrow, vcol, flowmodel)
            if in_strm_num == 0:
                # it's a headwater location so start a downstream flowpath
                c_id += 1
                tmp_row = vrow
                tmp_col = vcol
                sequenced_stream_d[tmp_row][tmp_col] = c_id
                searched_flag = True
                while searched_flag:
                    # find the downslope neighbour
                    tmpflowd8 = flowd8_data[tmp_row][tmp_col]
                    if tmpflowd8 < 0 or tmpflowd8 == flowd8_nodata:
                        if stream_data[tmp_row][tmp_col] > 0 \
                                and stream_data[tmp_row][tmp_col] != stream_nodata:
                            # it is a valid stream cell and probably just has no downslope
                            # neighbour (e.g. at the edge of the grid)
                            sequenced_stream_d[tmp_row][tmp_col] = c_id
                        break
                    tmp_row, tmp_col = D8Util.downstream_index(
                        tmpflowd8, tmp_row, tmp_col, flowmodel)
                    if tmp_row < 0 or tmp_row >= nrows or tmp_col < 0 or tmp_col >= ncols:
                        break
                    if stream_data[tmp_row][tmp_col] <= 0:
                        searched_flag = False  # it is not a stream cell
                    else:
                        if sequenced_stream_d[tmp_row][tmp_col] > 0:
                            # run into a larger stream, end the downstream search
                            break
                        # is it a confluence (conjunction node)
                        in_strm_num, in_coors = inflow_stream_number(
                            tmp_row, tmp_col, flowmodel)
                        if in_strm_num >= 2:
                            c_id += 1
                        sequenced_stream_d[tmp_row][tmp_col] = c_id
            return c_id

        def assign_hillslope_code_of_neighbors(vrow, vcol, flowmodel="taudem"):
            """set hillslope code for neighbors of current stream cell."""
            stream_coors.append((vrow, vcol))
            in_strm_num, in_coors = inflow_stream_number(vrow, vcol, flowmodel)
            strm_id = stream_data[vrow][vcol]
            # print('Assign hillslope code for stream cell, r: %d, c: %d, ID: %d' % (vrow, vcol,
            #                                                                        int(strm_id)))
            # set hillslope IDs
            hillslp_ids = DelineateHillslope.cal_hs_codes(max_id, strm_id)
            cur_d8_value = flowd8_data[vrow][vcol]
            if in_strm_num == 0:  # it is a one-order stream head
                headstream_coors.append((vrow, vcol))
                for (in_nostrm_row, in_nostrm_col) in in_coors:
                    hillslope_mtx[in_nostrm_row][in_nostrm_col] = hillslp_ids[
                        0]
            else:  # search the 3*3 neighbors by clockwise and counterclockwise separately
                if cur_d8_value <= 0 or cur_d8_value == flowd8_nodata:
                    return
                dirv = int(cur_d8_value)  # direction code
                d_idx = FlowModelConst.d8_dirs.get(flowmodel).index(
                    dirv)  # direction index
                # look to the right side, i.e. clockwise
                d_idx_r = d_idx
                while True and len(in_coors) > 0:
                    d_idx_r -= 1
                    if d_idx_r > 7:
                        d_idx_r = 0
                    if d_idx_r < 0:
                        d_idx_r = 7
                    tmp_row = vrow + FlowModelConst.ccw_drow[d_idx_r]
                    tmp_col = vcol + FlowModelConst.ccw_dcol[d_idx_r]
                    if (tmp_row, tmp_col
                        ) not in in_coors:  # not inflow to this cell
                        continue
                    tmpstream = stream_data[tmp_row][tmp_col]
                    in_coors.remove((tmp_row, tmp_col))
                    if tmpstream <= 0 or tmpstream == stream_nodata:
                        hillslope_mtx[tmp_row][tmp_col] = hillslp_ids[
                            1]  # right hillslope
                    else:  # encounter another in flow stream
                        break

                # look to the left side, i.e. counterclockwise
                d_idx_l = d_idx
                while True and len(in_coors) > 0:
                    d_idx_l += 1
                    if d_idx_l > 7:
                        d_idx_l = 0
                    if d_idx_l < 0:
                        d_idx_l = 7
                    tmp_row = vrow + FlowModelConst.ccw_drow[d_idx_l]
                    tmp_col = vcol + FlowModelConst.ccw_dcol[d_idx_l]
                    if (tmp_row, tmp_col
                        ) not in in_coors:  # not inflow to this cell
                        continue
                    tmpstream = stream_data[tmp_row][tmp_col]
                    in_coors.remove((tmp_row, tmp_col))
                    if tmpstream <= 0 or tmpstream == stream_nodata:
                        hillslope_mtx[tmp_row][tmp_col] = hillslp_ids[
                            2]  # left hillslope
                    else:  # encounter another in flow stream
                        break
                # if any inflow cells existed?
                if len(in_coors) > 0:
                    for (tmp_row, tmp_col) in in_coors:
                        tmpstream = stream_data[tmp_row][tmp_col]
                        if tmpstream <= 0 or tmpstream == stream_nodata:
                            hillslope_mtx[tmp_row][tmp_col] = hillslp_ids[0]
                            # add the current cell as head stream
                            headstream_coors.append((vrow, vcol))

        def output_hillslope(method_id):
            """Output hillslope according different stream cell value method."""
            for (tmp_row, tmp_col) in stream_coors:
                tmp_hillslp_ids = DelineateHillslope.cal_hs_codes(
                    max_id, stream_data[tmp_row][tmp_col])
                if 0 < method_id < 3:
                    hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[
                        method_id]
                    # is head stream cell?
                    if (tmp_row, tmp_col) in headstream_coors:
                        hillslope_mtx[tmp_row][tmp_col] = tmp_hillslp_ids[0]
                elif method_id == 3:
                    hillslope_mtx[tmp_row][tmp_col] = DEFAULT_NODATA
            # Output to raster file
            hillslope_out_new = hillslope_out
            dirpath = os.path.dirname(hillslope_out_new) + os.path.sep
            corename = FileClass.get_core_name_without_suffix(
                hillslope_out_new)
            if method_id == 1:
                hillslope_out_new = dirpath + corename + '_right.tif'
            elif method_id == 2:
                hillslope_out_new = dirpath + corename + '_left.tif'
            elif method_id == 3:
                hillslope_out_new = dirpath + corename + '_nodata.tif'
            RasterUtilClass.write_gtiff_file(hillslope_out_new, nrows, ncols,
                                             hillslope_mtx, geotrans, srs,
                                             DEFAULT_NODATA, datatype)

        # 1. assign a unique id to each link in the stream network if needed
        assign_stream_id = False
        tmp = numpy.where((stream_data > 0) & (stream_data != stream_nodata),
                          stream_data, numpy.nan)
        max_id = int(numpy.nanmax(tmp))  # i.e., stream link number
        min_id = int(numpy.nanmin(tmp))
        for i in range(min_id, max_id + 1):
            if i not in tmp:
                assign_stream_id = True
                break
        if max_id == min_id:
            assign_stream_id = True
        current_id = 0
        if assign_stream_id:
            # calculate and output sequenced stream raster
            sequenced_stream_d = numpy.ones((nrows, ncols)) * DEFAULT_NODATA
            for row in range(nrows):
                for col in range(ncols):
                    # if the cell is not a stream, or has been assigned an ID
                    if stream_data[row][col] <= 0 or stream_data[row][col] == stream_nodata \
                            or sequenced_stream_d[row][col] > 0:
                        continue
                    current_id = assign_sequenced_stream_ids(
                        current_id, row, col, d8alg)
            stream_data = numpy.copy(sequenced_stream_d)
            stream_nodata = DEFAULT_NODATA
            stream_core = FileClass.get_core_name_without_suffix(stream_raster)
            stream_seq_file = os.path.dirname(
                stream_raster) + os.path.sep + stream_core + '_seq.tif'
            RasterUtilClass.write_gtiff_file(stream_seq_file, nrows, ncols,
                                             sequenced_stream_d, geotrans, srs,
                                             DEFAULT_NODATA, datatype)
            max_id = current_id
        # 2. assign hillslope code according to the 3*3 neighbors of stream cells
        hillslope_mtx = numpy.copy(stream_data)
        hillslope_mtx[stream_data == stream_nodata] = DEFAULT_NODATA
        headstream_coors = []  # head stream cells
        stream_coors = []  # all stream cells, include head stream cells.
        for row in range(nrows):
            for col in range(ncols):
                # if not a stream cell, or hillslope code has been assigned
                if stream_data[row][col] <= 0 or stream_data[row][col] == stream_nodata \
                        or hillslope_mtx[row][col] < 0:
                    continue
                assign_hillslope_code_of_neighbors(row, col, d8alg)

        # 3. From each cell, search downstream for not assigned hillslope
        for row in range(nrows):
            for col in range(ncols):
                if hillslope_mtx[row][col] > 0 or flowd8_data[row][
                        col] == flowd8_nodata:
                    continue
                flag = False
                tmprow = row
                tmpcol = col
                tmpcoors = [(row, col)]
                hillslp_id = DEFAULT_NODATA
                while not flag:
                    # find it's downslope neighbour
                    curflowdir = flowd8_data[tmprow][tmpcol]
                    if curflowdir <= 0 or curflowdir == flowd8_nodata:
                        break
                    curflowdir = int(curflowdir)
                    tmprow, tmpcol = D8Util.downstream_index(
                        curflowdir, tmprow, tmpcol, d8alg)
                    if tmprow < 0 or tmprow >= nrows or tmpcol < 0 or tmpcol >= ncols:
                        break
                    # if the new cell already has a hillslope value, use that
                    if hillslope_mtx[tmprow][tmpcol] > 0:
                        hillslp_id = hillslope_mtx[tmprow][tmpcol]
                        flag = True
                    if not flag:
                        tmpcoors.append((tmprow, tmpcol))
                # set the source cells
                for (crow, ccol) in tmpcoors:
                    hillslope_mtx[crow][ccol] = hillslp_id

        # 4. reassign stream cell's value according to stream_value_method, and output
        if stream_value_method < 0:  # output
            output_hillslope(0)
            output_hillslope(1)
            output_hillslope(2)
            output_hillslope(3)
        else:
            output_hillslope(stream_value_method)
예제 #38
0
    def serialize_streamnet(streamnet_file, output_reach_file):
        """Eliminate reach with zero length and return the reach ID map.
        Args:
            streamnet_file: original stream net ESRI shapefile
            output_reach_file: serialized stream net, ESRI shapefile

        Returns:
            id pairs {origin: newly assigned}
        """
        FileClass.copy_files(streamnet_file, output_reach_file)
        ds_reach = ogr_Open(output_reach_file, update=True)
        layer_reach = ds_reach.GetLayer(0)
        layer_def = layer_reach.GetLayerDefn()
        i_link = layer_def.GetFieldIndex(FLD_LINKNO)
        i_link_downslope = layer_def.GetFieldIndex(FLD_DSLINKNO)
        i_len = layer_def.GetFieldIndex(REACH_LENGTH)

        old_id_list = []
        # there are some reaches with zero length.
        # this program will remove these zero-length reaches
        # output_dic is used to store the downstream reaches of these zero-length
        # reaches
        output_dic = {}
        ft = layer_reach.GetNextFeature()
        while ft is not None:
            link_id = ft.GetFieldAsInteger(i_link)
            reach_len = ft.GetFieldAsDouble(i_len)
            if link_id not in old_id_list:
                if reach_len < DELTA:
                    downstream_id = ft.GetFieldAsInteger(i_link_downslope)
                    output_dic[link_id] = downstream_id
                else:
                    old_id_list.append(link_id)

            ft = layer_reach.GetNextFeature()
        old_id_list.sort()

        id_map = {}
        for i, old_id in enumerate(old_id_list):
            id_map[old_id] = i + 1
        # print(id_map)
        # change old ID to new ID
        layer_reach.ResetReading()
        ft = layer_reach.GetNextFeature()
        while ft is not None:
            link_id = ft.GetFieldAsInteger(i_link)
            if link_id not in id_map:
                layer_reach.DeleteFeature(ft.GetFID())
                ft = layer_reach.GetNextFeature()
                continue

            ds_id = ft.GetFieldAsInteger(i_link_downslope)
            ds_id = output_dic.get(ds_id, ds_id)
            ds_id = output_dic.get(ds_id, ds_id)

            ft.SetField(FLD_LINKNO, id_map[link_id])
            if ds_id in id_map:
                ft.SetField(FLD_DSLINKNO, id_map[ds_id])
            else:
                # print(ds_id)
                ft.SetField(FLD_DSLINKNO, -1)
            layer_reach.SetFeature(ft)
            ft = layer_reach.GetNextFeature()
        ds_reach.ExecuteSQL("REPACK reach")
        layer_reach.SyncToDisk()
        ds_reach.Destroy()
        del ds_reach
        return id_map
예제 #39
0
    def __init__(self, cfg_parser, bin_dir=None, proc_num=-1, rawdem=None, root_dir=None):
        """
        Initialize an AutoFuzSlpPosConfig object
        Args:
            cfg_parser: ConfigParser object
            bin_dir: Executable binaries path
            proc_num: thread (or process) number used for MPI
            rawdem: DEM of study area
            root_dir: workspace path
        """
        # Part I Initialize attributes
        # 1.1. input parameters
        self.cf = cfg_parser
        self.bin_dir = bin_dir
        self.proc = proc_num
        self.root_dir = root_dir

        # 1.2. Required inputs
        self.dem = rawdem

        # 1.3. Executable Flags (Set default flags first)
        self.flag_preprocess = True
        self.flag_selecttyploc = True
        self.flag_auto_typlocparams = True
        self.flag_fuzzyinference = True
        self.flag_auto_inferenceparams = True
        self.flag_log = True
        # 1.4. Optional inputs
        self.mpi_dir = None
        self.hostfile = None
        self.outlet = None
        self.valley = None
        self.ridge = None
        self.regional_attr = None

        # 1.5. Optional DTA related parameters
        self.flow_model = 1
        self.rpi_method = 1
        self.dist_exp = 8
        self.max_move_dist = 50
        self.numthresh = 20
        self.d8_stream_thresh = 0
        self.d8_down_method = 'Surface'
        self.d8_stream_tag = 1
        self.d8_up_method = 'Surface'
        self.dinf_stream_thresh = 0
        self.dinf_down_stat = 'Average'
        self.dinf_down_method = 'Surface'
        self.dinf_dist_down_wg = None
        self.propthresh = 0.0
        self.dinf_up_stat = 'Average'
        self.dinf_up_method = 'Surface'

        # 1.6. Slope position types, tags, typical location extract value ranges, and inference
        #      parameters.
        self.slppostype = list()  # From top to bottom on hillslope.
        self.slppostag = list()  # The same sequence with self.slppostype
        self.selectedtopo = dict()  # Topographic attributes used for AutoFuzSlpPos
        self.extractrange = dict()
        self.param4typloc = dict()
        self.infshape = dict()
        self.inferparam = dict()

        # 1.7. derived attributes
        self.ws = None
        self.log = None
        self.topoparam = None
        self.slpposresult = None
        self.pretaudem = None
        self.singleslpposconf = dict()

        # Part II Set default settings, if cfg_parser is None, the program still can be executed.

        # default slope position settings
        self.slppostype = ['rdg', 'shd', 'bks', 'fts', 'vly']
        self.slppostag = [1, 2, 4, 8, 16]
        self.selectedtopolist = ['rpi', 'profc', 'slp', 'elev']
        self.selectedtopo = dict()
        self.extractrange = {'rdg': {'rpi': [0.99, 1.0]},
                             'shd': {'rpi': [0.9, 0.95]},
                             'bks': {'rpi': [0.5, 0.6]},
                             'fts': {'rpi': [0.15, 0.2]},
                             'vly': {'rpi': [0., 0.1]}}

        self._DEFAULT_PARAM_TYPLOC = [10, 0.1, 0.3, 1, 0.1, 1, 50, 4.0]
        for slppos in self.slppostype:
            self.param4typloc[slppos] = self._DEFAULT_PARAM_TYPLOC[:]

        self.infshape = {'rdg': {'rpi': 'S', 'profc': 'S', 'slp': 'Z', 'elev': 'SN'},
                         'shd': {'rpi': 'B', 'profc': 'S', 'slp': 'B', 'elev': 'N'},
                         'bks': {'rpi': 'B', 'profc': 'B', 'slp': 'S', 'elev': 'N'},
                         'fts': {'rpi': 'B', 'profc': 'ZB', 'slp': 'ZB', 'elev': 'N'},
                         'vly': {'rpi': 'Z', 'profc': 'Z', 'slp': 'Z', 'elev': 'N'}}
        if self.cf is None:
            if self.dem is not None and self.bin_dir is not None:
                if self.root_dir is None:
                    self.root_dir = os.path.dirname(self.dem)
                if self.mpi_dir is None:
                    mpipath = FileClass.get_executable_fullpath('mpiexec')
                    self.mpi_dir = os.path.dirname(mpipath)
                if self.mpi_dir is None:
                    raise RuntimeError('Can not find mpiexec, make sure you have it installed!')
            else:
                raise RuntimeError("You MUST select one of ini file or dem and bin!")
        if self.root_dir is not None:
            self.ws = CreateWorkspace(self.root_dir)
            self.log = LogNames(self.ws.log_dir)
            self.topoparam = TopoAttrNames(self.ws)
            self.slpposresult = FuzSlpPosFiles(self.ws)
            self.pretaudem = PreProcessAttrNames(self.ws.pre_dir, self.flow_model)
            for attr in self.selectedtopolist:
                self.selectedtopo[attr] = self.topoparam.get_attr_file(attr)
            for slppos in self.slppostype:
                self.singleslpposconf[slppos] = SingleSlpPosFiles(self.ws, slppos)

        # Part III Parse the *.ini configuration file if existed
        #          Be careful, bin_dir, root_dir, proc_num, and rawdem related attributes
        #          that have already set MUST not be changed in the following procedures.
        if self.cf is not None:
            # Parse and check validation of all available inputs
            # define the section names in the *.ini configuration file
            _require = 'REQUIRED'
            _flag = 'EXECUTABLE_FLAGS'
            _optdta = 'OPTIONAL_DTA'
            _opt = 'OPTIONAL'
            _opttyploc = 'OPTIONAL_TYPLOC'
            _optfuzinf = 'OPTIONAL_FUZINF'
            self.read_required_section(_require)
            self.read_flag_section(_flag)
            self.read_optionaldta_section(_optdta)
            self.read_optional_section(_opt)
            self.read_optiontyploc_section(_opttyploc)
            self.read_optionfuzinf_section(_optfuzinf)
예제 #40
0
    def __init__(self,
                 bin_dir='',
                 model_dir='',
                 nthread=4,
                 lyrmtd=0,
                 host='127.0.0.1',
                 port=27017,
                 scenario_id=-1,
                 calibration_id=-1,
                 version='OMP',
                 nprocess=1,
                 mpi_bin='',
                 hosts_opt='-f',
                 hostfile='',
                 **kwargs):  # Allow any other keyword arguments
        #  Derived from input arguments
        args_dict = dict()
        if 'args_dict' in kwargs:  # Preferred to use 'args_dict' if existed.
            args_dict = kwargs['args_dict']
        bin_dir = args_dict['bin_dir'] if 'bin_dir' in args_dict else bin_dir
        model_dir = args_dict[
            'model_dir'] if 'model_dir' in args_dict else model_dir
        self.version = args_dict[
            'version'] if 'version' in args_dict else version
        suffix = '.exe' if sysstr == 'Windows' else ''
        if self.version == 'MPI':
            self.seims_exec = bin_dir + os.path.sep + 'seims_mpi' + suffix
        else:
            self.seims_exec = bin_dir + os.path.sep + 'seims_omp' + suffix
            if not FileClass.is_file_exists(
                    self.seims_exec):  # If not support OpenMP, use `seims`!
                self.seims_exec = bin_dir + os.path.sep + 'seims' + suffix
        self.seims_exec = os.path.abspath(self.seims_exec)
        self.model_dir = os.path.abspath(model_dir)

        self.nthread = args_dict[
            'nthread'] if 'nthread' in args_dict else nthread
        self.lyrmtd = args_dict['lyrmtd'] if 'lyrmtd' in args_dict else lyrmtd
        self.host = args_dict['host'] if 'host' in args_dict else host
        self.port = args_dict['port'] if 'port' in args_dict else port
        self.scenario_id = args_dict[
            'scenario_id'] if 'scenario_id' in args_dict else scenario_id
        self.calibration_id = args_dict[
            'calibration_id'] if 'calibration_id' in args_dict else calibration_id
        self.nprocess = args_dict[
            'nprocess'] if 'nprocess' in args_dict else nprocess
        self.mpi_bin = args_dict[
            'mpi_bin'] if 'mpi_bin' in args_dict else mpi_bin
        self.hosts_opt = args_dict[
            'hosts_opt'] if 'hosts_opt' in args_dict else hosts_opt
        self.hostfile = args_dict[
            'hostfile'] if 'hostfile' in args_dict else hostfile

        # Concatenate executable command
        self.cmd = self.Command
        self.run_success = False
        self.output_dir = self.OutputDirectory
        # Read model data from MongoDB
        self.db_name = os.path.split(self.model_dir)[1]
        self.outlet_id = self.OutletID
        self.start_time, self.end_time = self.SimulatedPeriod
        # Data maybe used after model run
        self.timespan = dict()
        self.obs_vars = list()  # Observation types at the outlet
        self.obs_value = dict(
        )  # Observation value, key: DATETIME, value: value list of obs_vars
        self.sim_vars = list(
        )  # Simulation types at the outlet, which is part of obs_vars
        self.sim_value = dict()  # Simulation value, same as obs_value
        # The format of sim_obs_dict:
        #         {VarName: {'UTCDATETIME': [t1, t2, ..., tn],
        #                    'Obs': [o1, o2, ..., on],
        #                    'Sim': [s1, s2, ..., sn]},
        #         ...
        #         }
        self.sim_obs_dict = dict()
예제 #41
0
    def __init__(self, cf):
        # Default arguments
        self.host = '127.0.0.1'  # localhost by default
        self.port = 27017
        self.bin_dir = ''
        self.model_dir = ''
        self.db_name = ''
        self.version = 'OMP'
        self.mpi_bin = None
        self.hosts_opt = None
        self.hostfile = None
        self.nprocess = 1
        self.nthread = 1
        self.lyrmtd = 1
        self.scenario_id = 0
        self.calibration_id = -1
        self.config_dict = dict()

        if 'SEIMS_Model' not in cf.sections():
            raise ValueError(
                "[SEIMS_Model] section MUST be existed in *.ini file.")

        self.host = cf.get('SEIMS_Model', 'hostname')
        self.port = cf.getint('SEIMS_Model', 'port')
        if not StringClass.is_valid_ip_addr(self.host):
            raise ValueError('HOSTNAME defined in [SEIMS_Model] is illegal!')

        self.bin_dir = cf.get('SEIMS_Model', 'bin_dir')
        self.model_dir = cf.get('SEIMS_Model', 'model_dir')
        if not (FileClass.is_dir_exists(self.model_dir)
                and FileClass.is_dir_exists(self.bin_dir)):
            raise IOError('Please Check Directories defined in [SEIMS_Model]. '
                          'BIN_DIR and MODEL_DIR are required!')
        self.db_name = os.path.split(self.model_dir)[1]

        if cf.has_option('SEIMS_Model', 'version'):
            self.version = cf.get('SEIMS_Model', 'version')
        if cf.has_option('SEIMS_Model',
                         'mpi_bin'):  # full path of the executable MPI program
            self.mpi_bin = cf.get('SEIMS_Model', 'mpi_bin')
        if cf.has_option('SEIMS_Model', 'hostopt'):
            self.hosts_opt = cf.get('SEIMS_Model', 'hostopt')
        if cf.has_option('SEIMS_Model', 'hostfile'):
            self.hostfile = cf.get('SEIMS_Model', 'hostfile')
        if cf.has_option('SEIMS_Model', 'processnum'):
            self.nprocess = cf.getint('SEIMS_Model', 'processnum')
        if cf.has_option('SEIMS_Model', 'threadsnum'):
            self.nthread = cf.getint('SEIMS_Model', 'threadsnum')
        if cf.has_option('SEIMS_Model', 'layeringmethod'):
            self.lyrmtd = cf.getint('SEIMS_Model', 'layeringmethod')
        if cf.has_option('SEIMS_Model', 'scenarioid'):
            self.scenario_id = cf.getint('SEIMS_Model', 'scenarioid')
        if cf.has_option('SEIMS_Model', 'calibrationid'):
            self.calibration_id = cf.getint('SEIMS_Model', 'calibrationid')

        if not (cf.has_option('SEIMS_Model', 'sim_time_start')
                and cf.has_option('SEIMS_Model', 'sim_time_end')):
            raise ValueError(
                "Start and end time MUST be specified in [SEIMS_Model].")

        try:
            # UTCTIME
            tstart = cf.get('SEIMS_Model', 'sim_time_start')
            tend = cf.get('SEIMS_Model', 'sim_time_end')
            self.time_start = StringClass.get_datetime(tstart)
            self.time_end = StringClass.get_datetime(tend)
        except ValueError:
            raise ValueError('The time format MUST be "YYYY-MM-DD HH:MM:SS".')
        if self.time_start >= self.time_end:
            raise ValueError("Wrong time settings in [SEIMS_Model]!")
        # Running time counted by time.time() of Python, in case of failed of GetTimespan()
        self.runtime = 0.
예제 #42
0
    def __init__(self, cf):
        """Initialization."""
        # 1. Directories
        self.base_dir = None
        self.clim_dir = None
        self.spatial_dir = None
        self.observe_dir = None
        self.scenario_dir = None
        self.model_dir = None
        self.txt_db_dir = None
        self.preproc_script_dir = None
        self.seims_bin = None
        self.mpi_bin = None
        self.workspace = None
        # 1.1. Directory determined flags
        self.use_observed = True
        self.use_scernario = True
        # 2. MongoDB configuration and database, collation, GridFS names
        self.hostname = '127.0.0.1'  # localhost by default
        self.port = 27017
        self.climate_db = ''
        self.bmp_scenario_db = ''
        self.spatial_db = ''
        # 3. Switch for building SEIMS. These switches should be removed! By lj.
        # self.gen_cn = True
        # self.gen_runoff_coef = True
        # self.gen_crop = True
        # self.gen_iuh = True
        # 4. Climate inputs
        self.hydro_climate_vars = None
        self.prec_sites = None
        self.prec_data = None
        self.Meteo_sites = None
        self.Meteo_data = None
        self.thiessen_field = 'ID'
        # 5. Spatial inputs
        self.prec_sites_thiessen = None
        self.meteo_sites_thiessen = None
        self.dem = None
        self.outlet_file = None
        self.landuse = None
        self.landcover_init_param = None
        self.soil = None
        self.soil_property = None
        self.fields_partition = False
        self.fields_partition_thresh = list()
        self.additional_rs = dict()
        # 6. Option parameters
        self.d8acc_threshold = 0
        self.np = 4
        self.d8down_method = 's'
        self.dorm_hr = -1.
        self.temp_base = 0.
        self.imper_perc_in_urban = 0.
        self.default_landuse = -1
        self.default_soil = -1
        # 1. Directories
        if 'PATH' in cf.sections():
            self.base_dir = cf.get('PATH', 'base_data_dir')
            self.clim_dir = cf.get('PATH', 'climate_data_dir')
            self.spatial_dir = cf.get('PATH', 'spatial_data_dir')
            self.observe_dir = cf.get('PATH', 'measurement_data_dir')
            self.scenario_dir = cf.get('PATH', 'bmp_data_dir')
            self.model_dir = cf.get('PATH', 'model_dir')
            self.txt_db_dir = cf.get('PATH', 'txt_db_dir')
            self.preproc_script_dir = cf.get('PATH', 'preproc_script_dir')
            self.seims_bin = cf.get('PATH', 'cpp_program_dir')
            self.mpi_bin = cf.get('PATH', 'mpiexec_dir')
            self.workspace = cf.get('PATH', 'working_dir')
        else:
            raise ValueError('[PATH] section MUST be existed in *.ini file.')
        if not (FileClass.is_dir_exists(self.base_dir)
                and FileClass.is_dir_exists(self.model_dir)
                and FileClass.is_dir_exists(self.txt_db_dir)
                and FileClass.is_dir_exists(self.preproc_script_dir)
                and FileClass.is_dir_exists(self.seims_bin)):
            raise IOError(
                'Please Check Directories defined in [PATH]. '
                'BASE_DATA_DIR, MODEL_DIR, TXT_DB_DIR, PREPROC_SCRIPT_DIR, '
                'and CPP_PROGRAM_DIR are required!')
        if not FileClass.is_dir_exists(self.mpi_bin):
            self.mpi_bin = None
        if not FileClass.is_dir_exists(self.workspace):
            try:  # first try to make dirs
                UtilClass.mkdir(self.workspace)
                # os.mkdir(self.workspace)
            except OSError as exc:
                self.workspace = self.model_dir + os.path.sep + 'preprocess_output'
                print('WARNING: Make WORKING_DIR failed: %s. '
                      'Use the default: %s' % (exc.message, self.workspace))
                if not os.path.exists(self.workspace):
                    UtilClass.mkdir(self.workspace)

        self.dirs = DirNameUtils(self.workspace)
        self.logs = LogNameUtils(self.dirs.log)
        self.vecs = VectorNameUtils(self.dirs.geoshp)
        self.taudems = TauDEMFilesUtils(self.dirs.taudem)
        self.spatials = SpatialNamesUtils(self.dirs.geodata2db)
        self.modelcfgs = ModelCfgUtils(self.model_dir)
        self.paramcfgs = ModelParamDataUtils(self.preproc_script_dir +
                                             os.path.sep + 'database')

        if not FileClass.is_dir_exists(self.clim_dir):
            print(
                'The CLIMATE_DATA_DIR is not existed, try the default folder name "climate".'
            )
            self.clim_dir = self.base_dir + os.path.sep + 'climate'
            if not FileClass.is_dir_exists(self.clim_dir):
                raise IOError(
                    'Directories named "climate" MUST BE located in [base_dir]!'
                )

        if not FileClass.is_dir_exists(self.spatial_dir):
            print(
                'The SPATIAL_DATA_DIR is not existed, try the default folder name "spatial".'
            )
            self.spatial_dir = self.base_dir + os.path.sep + 'spatial'
            raise IOError(
                'Directories named "spatial" MUST BE located in [base_dir]!')

        if not FileClass.is_dir_exists(self.observe_dir):
            self.observe_dir = None
            self.use_observed = False

        if not FileClass.is_dir_exists(self.scenario_dir):
            self.scenario_dir = None
            self.use_scernario = False

        # 2. MongoDB related
        if 'MONGODB' in cf.sections():
            self.hostname = cf.get('MONGODB', 'hostname')
            self.port = cf.getint('MONGODB', 'port')
            self.climate_db = cf.get('MONGODB', 'climatedbname')
            self.bmp_scenario_db = cf.get('MONGODB', 'bmpscenariodbname')
            self.spatial_db = cf.get('MONGODB', 'spatialdbname')
        else:
            raise ValueError(
                '[MONGODB] section MUST be existed in *.ini file.')
        if not StringClass.is_valid_ip_addr(self.hostname):
            raise ValueError('HOSTNAME illegal defined in [MONGODB]!')

        # 3. Model related switch. The SWITCH section should be removed! By lj.
        # by default, OpenMP version and daily (longterm) mode will be built
        # if 'SWITCH' in cf.sections():
        #     self.gen_cn = cf.getboolean('SWITCH', 'gencn')
        #     self.gen_runoff_coef = cf.getboolean('SWITCH', 'genrunoffcoef')
        #     self.gen_crop = cf.getboolean('SWITCH', 'gencrop')
        #
        # if self.storm_mode:
        #     self.gen_iuh = False
        #     self.climate_db = ModelNameUtils.standardize_climate_dbname(self.climate_db)

        # 4. Climate Input
        if 'CLIMATE' in cf.sections():
            self.hydro_climate_vars = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'hydroclimatevarfile')
            self.prec_sites = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'precsitefile')
            self.prec_data = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'precdatafile')
            self.Meteo_sites = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'meteositefile')
            self.Meteo_data = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'meteodatafile')
            self.thiessen_field = cf.get('CLIMATE', 'thiessenidfield')
        else:
            raise ValueError(
                'Climate input file names MUST be provided in [CLIMATE]!')

        # 5. Spatial Input
        if 'SPATIAL' in cf.sections():
            self.prec_sites_thiessen = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'precsitesthiessen')
            self.meteo_sites_thiessen = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'meteositesthiessen')
            self.dem = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'dem')
            self.outlet_file = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'outlet_file')
            if not os.path.exists(self.outlet_file):
                self.outlet_file = None
            self.landuse = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'landusefile')
            self.landcover_init_param = self.txt_db_dir + os.path.sep + cf.get(
                'SPATIAL', 'landcoverinitfile')
            self.soil = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'soilseqnfile')
            self.soil_property = self.txt_db_dir + os.path.sep + cf.get(
                'SPATIAL', 'soilseqntext')
            if cf.has_option('SPATIAL', 'additionalfile'):
                additional_dict_str = cf.get('SPATIAL', 'additionalfile')
                tmpdict = json.loads(additional_dict_str)
                tmpdict = {
                    str(k): (str(v) if isinstance(v, str) else v)
                    for k, v in list(tmpdict.items())
                }
                for k, v in list(tmpdict.items()):
                    # Existence check has been moved to mask_origin_delineated_data()
                    #  in sp_delineation.py
                    self.additional_rs[k] = v
            # Field partition
            if cf.has_option('SPATIAL', 'field_partition_thresh'):
                ths = cf.get('SPATIAL', 'field_partition_thresh')
                thsv = StringClass.extract_numeric_values_from_string(ths)
                if thsv is not None:
                    self.fields_partition_thresh = [int(v) for v in thsv]
                    self.fields_partition = True
        else:
            raise ValueError(
                'Spatial input file names MUST be provided in [SPATIAL]!')

        # 6. Option parameters
        if 'OPTIONAL_PARAMETERS' in cf.sections():
            self.d8acc_threshold = cf.getfloat('OPTIONAL_PARAMETERS',
                                               'd8accthreshold')
            self.np = cf.getint('OPTIONAL_PARAMETERS', 'np')
            self.d8down_method = cf.get('OPTIONAL_PARAMETERS', 'd8downmethod')
            if StringClass.string_match(self.d8down_method, 'surface'):
                self.d8down_method = 's'
            elif StringClass.string_match(self.d8down_method, 'horizontal'):
                self.d8down_method = 'h'
            elif StringClass.string_match(self.d8down_method, 'pythagoras'):
                self.d8down_method = 'p'
            elif StringClass.string_match(self.d8down_method, 'vertical'):
                self.d8down_method = 'v'
            else:
                self.d8down_method = self.d8down_method.lower()
                if self.d8down_method not in ['s', 'h', 'p', 'v']:
                    self.d8down_method = 'h'
            self.dorm_hr = cf.getfloat('OPTIONAL_PARAMETERS', 'dorm_hr')
            self.temp_base = cf.getfloat('OPTIONAL_PARAMETERS', 't_base')
            self.imper_perc_in_urban = cf.getfloat(
                'OPTIONAL_PARAMETERS', 'imperviouspercinurbancell')
            self.default_landuse = cf.getint('OPTIONAL_PARAMETERS',
                                             'defaultlanduse')
            self.default_soil = cf.getint('OPTIONAL_PARAMETERS', 'defaultsoil')
예제 #43
0
    def __init__(self, cf):
        """Initialization."""
        # 1. NSGA-II related parameters
        self.nsga2_ngens = 1
        self.nsga2_npop = 4
        self.nsga2_rcross = 0.75
        self.nsga2_pmut = 0.05
        self.nsga2_rmut = 0.1
        self.nsga2_rsel = 0.8
        if 'NSGA2' in cf.sections():
            self.nsga2_ngens = cf.getint('NSGA2', 'generationsnum')
            self.nsga2_npop = cf.getint('NSGA2', 'populationsize')
            self.nsga2_rcross = cf.getfloat('NSGA2', 'crossoverrate')
            self.nsga2_pmut = cf.getfloat('NSGA2', 'maxmutateperc')
            self.nsga2_rmut = cf.getfloat('NSGA2', 'mutaterate')
            self.nsga2_rsel = cf.getfloat('NSGA2', 'selectrate')
        else:
            raise ValueError('[NSGA2] section MUST be existed in *.ini file.')
        if self.nsga2_npop % 4 != 0:
            raise ValueError('PopulationSize must be a multiple of 4.')
        # 2. MongoDB
        self.hostname = '127.0.0.1'  # localhost by default
        self.port = 27017
        self.spatial_db = ''
        self.bmp_scenario_db = ''
        if 'MONGODB' in cf.sections():
            self.hostname = cf.get('MONGODB', 'hostname')
            self.port = cf.getint('MONGODB', 'port')
            self.spatial_db = cf.get('MONGODB', 'spatialdbname')
            self.bmp_scenario_db = cf.get('MONGODB', 'bmpscenariodbname')
        else:
            raise ValueError(
                '[MONGODB] section MUST be existed in *.ini file.')
        if not StringClass.is_valid_ip_addr(self.hostname):
            raise ValueError('HOSTNAME illegal defined in [MONGODB]!')

        # 3. SEIMS_Model
        self.model_dir = ''
        self.seims_bin = ''
        self.seims_nthread = 1
        self.seims_lyrmethod = 0
        if 'SEIMS_Model' in cf.sections():
            self.model_dir = cf.get('SEIMS_Model', 'model_dir')
            self.seims_bin = cf.get('SEIMS_Model', 'bin_dir')
            self.seims_nthread = cf.getint('SEIMS_Model', 'threadsnum')
            self.seims_lyrmethod = cf.getint('SEIMS_Model', 'layeringmethod')
        else:
            raise ValueError(
                "[SEIMS_Model] section MUST be existed in *.ini file.")
        if not (FileClass.is_dir_exists(self.model_dir)
                and FileClass.is_dir_exists(self.seims_bin)):
            raise IOError('Please Check Directories defined in [PATH]. '
                          'BIN_DIR and MODEL_DIR are required!')

        # 4. Application specific setting section [BMPs]
        self.bmps_info = dict()
        self.bmps_rule = False
        self.rule_method = 1
        self.bmps_retain = dict()
        self.export_sce_txt = False
        self.export_sce_tif = False
        if 'BMPs' in cf.sections():
            bmpsinfostr = cf.get('BMPs', 'bmps_info')
            self.bmps_rule = cf.getboolean('BMPs', 'bmps_rule')
            if cf.has_option('BMPs', 'rule_method'):
                self.rule_method = cf.getint('BMPs', 'rule_method')
            if cf.has_option('BMPs', 'bmps_retain'):
                bmpsretainstr = cf.get('BMPs', 'bmps_retain')
                self.bmps_retain = json.loads(bmpsretainstr)
                self.bmps_retain = UtilClass.decode_strs_in_dict(
                    self.bmps_retain)
            if cf.has_option('BMPs', 'export_scenario_txt'):
                self.export_sce_txt = cf.getboolean('BMPs',
                                                    'export_scenario_txt')
            if cf.has_option('BMPs', 'export_scenario_tif'):
                self.export_sce_tif = cf.getboolean('BMPs',
                                                    'export_scenario_tif')
        else:
            raise ValueError("[BMPs] section MUST be existed for specific SA.")
        self.bmps_info = json.loads(bmpsinfostr)
        self.bmps_info = UtilClass.decode_strs_in_dict(self.bmps_info)

        # 5. Application specific setting section [Effectiveness]
        self.worst_econ = 0
        self.worst_env = 0
        self.runtime_years = 0
        if 'Effectiveness' in cf.sections():
            self.worst_econ = cf.getfloat('Effectiveness', 'worst_economy')
            self.worst_env = cf.getfloat('Effectiveness', 'worst_environment')
            self.runtime_years = cf.getfloat('Effectiveness', 'runtime_years')
            self.runtime_years = cf.getfloat('Effectiveness', 'runtime_years')

        # 6. define gene_values
        fn = 'Gen_%d_Pop_%d' % (self.nsga2_ngens, self.nsga2_npop)
        fn += '_rule' if self.bmps_rule else '_random'
        self.nsga2_dir = self.model_dir + os.path.sep + 'NSGA2_OUTPUT' + os.path.sep + fn
        self.scenario_dir = self.nsga2_dir + os.path.sep + 'Scenarios'
        UtilClass.rmmkdir(self.nsga2_dir)
        UtilClass.rmmkdir(self.scenario_dir)
        self.hypervlog = self.nsga2_dir + os.path.sep + 'hypervolume.txt'
        self.scenariolog = self.nsga2_dir + os.path.sep + 'scenarios_info.txt'
        self.logfile = self.nsga2_dir + os.path.sep + 'runtime.log'
        self.logbookfile = self.nsga2_dir + os.path.sep + 'logbook.txt'
예제 #44
0
    def __init__(
            self,
            tag_names,  # type: List[Tuple[int, AnyStr]]
            slpposf,  # type: AnyStr
            reach_shp,  # type: AnyStr
            hillslpf,  # type: AnyStr
            landusef  # type: AnyStr
    ):
        # type: (...) -> None
        """Initialization.

        Args:
            tag_names: [tag(integer), name(str)], tag should be ascending from up to bottom.
            slpposf: Crisp classification of slope position full filename.
            reach_shp: Reach shapefile used to extract the up-down relationships of subbasins
            hillslpf: Delineated hillslope file by sd_hillslope.py.
            landusef: Landuse, used to statistics areas of each landuse types within
                      slope position units

        Attributes:
            slppos_tags(OrderedDict): {tag: name}
            subbsin_tree: up-down stream relationships of subbasins.
                          {subbsnID: {'upstream': [], 'downstream': []}}
            units_updwon: Output json data of slope position units.
                {"slppos_1": {id:{"downslope": [ids], "upslope": [ids], "landuse": {luID: area}
                                  "hillslope": [hillslpID], "subbasin": [subbsnID], "area": area
                                 }
                             }
                 "slppos_2": ...
                }
        """
        # Check the file existence
        FileClass.check_file_exists(slpposf)
        FileClass.check_file_exists(reach_shp)
        FileClass.check_file_exists(hillslpf)
        FileClass.check_file_exists(landusef)
        # Set inputs
        self.ws = os.path.dirname(slpposf)
        tag_names = sorted(tag_names, key=lambda x: x[0])
        # initialize slope position dict with up-down relationships
        self.slppos_tags = OrderedDict(
        )  # type: Dict[int, Dict[AnyStr, Union[int, AnyStr]]]
        for idx, tagname in enumerate(tag_names):
            tag, name = tagname
            if len(tag_names) > 1:
                if idx == 0:
                    self.slppos_tags[int(tag)] = {
                        'name': name,
                        'upslope': -1,
                        'downslope': tag_names[idx + 1][0]
                    }
                elif idx == len(tag_names) - 1:
                    self.slppos_tags[int(tag)] = {
                        'name': name,
                        'upslope': tag_names[idx - 1][0],
                        'downslope': -1
                    }
                else:
                    self.slppos_tags[int(tag)] = {
                        'name': name,
                        'upslope': tag_names[idx - 1][0],
                        'downslope': tag_names[idx + 1][0]
                    }
            else:
                self.slppos_tags[int(tag)] = {
                    'name': name,
                    'upslope': -1,
                    'downslope': -1
                }

        self.reach = reach_shp
        # read raster data and check the extent based on hillslope.
        hillslpr = RasterUtilClass.read_raster(hillslpf)
        self.data_hillslp = hillslpr.data
        self.nrows = hillslpr.nRows
        self.ncols = hillslpr.nCols
        self.dx = hillslpr.dx
        self.nodata_hillslp = hillslpr.noDataValue
        self.geotrans = hillslpr.geotrans
        self.srs = hillslpr.srs
        self.datatype = hillslpr.dataType
        slpposr = RasterUtilClass.read_raster(slpposf)
        if slpposr.nRows != self.nrows or slpposr.nCols != self.ncols:
            raise ValueError(
                'The slopeposition raster MUST have the same dimensions'
                ' with hillslope!')
        self.data_slppos = slpposr.data
        self.nodata_slppos = slpposr.noDataValue
        landuser = RasterUtilClass.read_raster(landusef)
        if landuser.nRows != self.nrows or landuser.nCols != self.ncols:
            raise ValueError(
                'The landuser raster MUST have the same dimensions'
                ' with hillslope!')
        self.data_landuse = landuser.data
        self.nodata_landuse = landuser.noDataValue

        # Set intermediate data
        self.subbsin_num = -1
        self.subbsin_tree = dict(
        )  # type: Dict[int, int]  # {subbsnID: dst_subbsnID}
        self.units_updwon = OrderedDict(
        )  # type: Dict[AnyStr, Dict[int, Dict[AnyStr, Union[List[float], AnyStr]]]]
        for tag in self.slppos_tags:
            self.units_updwon[self.slppos_tags.get(tag).get('name')] = dict()
        self.slppos_ids = numpy.ones((self.nrows, self.ncols)) * DEFAULT_NODATA
        self.hierarchy_units = dict(
        )  # type: Dict[int, Dict[int, Dict[AnyStr, int]]]

        # Set gene_values of outputs
        self.outf_units_origin = self.ws + os.path.sep + 'slppos_units_origin_uniqueid.tif'
        self.outshp_units_origin = self.ws + os.path.sep + 'origin_uniqueid.shp'
        self.json_units_origin = self.ws + os.path.sep + 'original_updown.json'
        self.outf_units_merged = self.ws + os.path.sep + 'slppos_units.tif'
        self.outshp_units_merged = self.ws + os.path.sep + 'slppos_units_merged.shp'
        self.json_units_merged = self.ws + os.path.sep + 'updown.json'
예제 #45
0
def pre_processing(cfg):
    start_t = time.time()
    if not cfg.flag_preprocess:
        return 0
    single_basin = False
    if cfg.outlet is not None:
        single_basin = True
    pretaudem_done = check_watershed_delineation_results(cfg)
    if cfg.valley is None or not FileClass.is_file_exists(cfg.valley) or not pretaudem_done:
        cfg.valley = cfg.pretaudem.stream_raster
        # Watershed delineation based on D8 flow model.
        TauDEMWorkflow.watershed_delineation(cfg.proc, cfg.dem, cfg.outlet, cfg.d8_stream_thresh,
                                             single_basin,
                                             cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                             logfile=cfg.log.preproc, runtime_file=cfg.log.runtime,
                                             hostfile=cfg.hostfile)
    # use outlet_m or not
    outlet_use = None
    if single_basin:
        outlet_use = cfg.pretaudem.outlet_m
    log_status = open(cfg.log.preproc, 'a', encoding='utf-8')
    log_status.write('Calculating RPI(Relative Position Index)...\n')
    log_status.flush()
    if cfg.flow_model == 1:  # Dinf model, extract stream using the D8 threshold
        if cfg.valley is None or not FileClass.is_file_exists(cfg.valley):
            if cfg.d8_stream_thresh <= 0:
                with open(cfg.pretaudem.drptxt, 'r', encoding='utf-8') as drpf:
                    temp_contents = drpf.read()
                    (beg, cfg.d8_stream_thresh) = temp_contents.rsplit(' ', 1)
            print(cfg.d8_stream_thresh)
            TauDEMExtension.areadinf(cfg.proc, cfg.pretaudem.dinf,
                                     cfg.pretaudem.dinfacc_weight, outlet_use,
                                     cfg.pretaudem.stream_pd, 'false',
                                     cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                     cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
            TauDEMExtension.threshold(cfg.proc, cfg.pretaudem.dinfacc_weight,
                                      cfg.pretaudem.stream_dinf, float(cfg.d8_stream_thresh),
                                      cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                      cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
            cfg.valley = cfg.pretaudem.stream_dinf
        # calculate Height Above the Nearest Drainage (HAND)
        TauDEMExtension.dinfdistdown(cfg.proc, cfg.pretaudem.dinf, cfg.pretaudem.filldem,
                                     cfg.pretaudem.dinf_slp, cfg.valley,
                                     cfg.dinf_down_stat, 'v', 'false',
                                     cfg.dinf_dist_down_wg, cfg.pretaudem.dist2stream_v,
                                     cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                     cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
    else:
        # calculate Height Above the Nearest Drainage (HAND)
        TauDEMExtension.d8distdowntostream(cfg.proc, cfg.pretaudem.d8flow,
                                           cfg.pretaudem.filldem, cfg.valley,
                                           cfg.pretaudem.dist2stream_v, 'v', 1,
                                           cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                           cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
    if cfg.rpi_method == 1:  # calculate RPI based on hydrological proximity measures (Default).
        if cfg.flow_model == 0:  # D8 model
            TauDEMExtension.d8distdowntostream(cfg.proc, cfg.pretaudem.d8flow,
                                               cfg.pretaudem.filldem, cfg.valley,
                                               cfg.pretaudem.dist2stream, cfg.d8_down_method, 1,
                                               cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                               cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
            TauDEMExtension.d8distuptoridge(cfg.proc, cfg.pretaudem.d8flow,
                                            cfg.pretaudem.filldem, cfg.ridge,
                                            cfg.pretaudem.distup2rdg, cfg.d8_up_method,
                                            cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                            cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
        elif cfg.flow_model == 1:  # Dinf model
            # Dinf distance down
            TauDEMExtension.dinfdistdown(cfg.proc, cfg.pretaudem.dinf, cfg.pretaudem.filldem,
                                         cfg.pretaudem.dinf_slp, cfg.valley,
                                         cfg.dinf_down_stat, cfg.dinf_down_method, 'false',
                                         cfg.dinf_dist_down_wg, cfg.pretaudem.dist2stream,
                                         cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                         cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
            TauDEMExtension.dinfdistuptoridge(cfg.proc, cfg.pretaudem.dinf,
                                              cfg.pretaudem.filldem, cfg.pretaudem.dinf_slp,
                                              cfg.propthresh, cfg.pretaudem.distup2rdg,
                                              cfg.dinf_up_stat, cfg.dinf_up_method, 'false',
                                              cfg.ridge,
                                              cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                              cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
        TauDEMExtension.simplecalculator(cfg.proc, cfg.pretaudem.dist2stream,
                                         cfg.pretaudem.distup2rdg, cfg.pretaudem.rpi_hydro, 4,
                                         cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                         cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
    if cfg.rpi_method == 0:  # calculate RPI based on Skidmore's method
        if cfg.ridge is None or not FileClass.is_file_exists(cfg.ridge):
            cfg.ridge = cfg.pretaudem.rdgsrc
            angfile = cfg.pretaudem.d8flow
            elevfile = cfg.pretaudem.dist2stream_v
            if cfg.flow_model == 1:  # D-inf model
                angfile = cfg.pretaudem.dinf
                elevfile = cfg.pretaudem.dist2stream_v
            TauDEMExtension.extractridge(cfg.proc, angfile, elevfile, cfg.ridge,
                                         cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                         cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
        TauDEMExtension.rpiskidmore(cfg.proc, cfg.valley, cfg.ridge,
                                    cfg.pretaudem.rpi_skidmore, 1, 1,
                                    cfg.pretaudem.dist2stream_ed, cfg.pretaudem.dist2rdg_ed,
                                    cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                                    cfg.log.preproc, cfg.log.runtime, cfg.hostfile)
    log_status.write('Calculating Horizontal Curvature and Profile Curvature...\n')
    TauDEMExtension.curvature(cfg.proc, cfg.pretaudem.filldem,
                              cfg.topoparam.profc, cfg.topoparam.horizc,
                              None, None, None, None, None,
                              cfg.ws.pre_dir, cfg.mpi_dir, cfg.bin_dir,
                              cfg.log.preproc, cfg.log.runtime, cfg.hostfile)

    if cfg.flow_model == 0:
        slope_rad_to_deg(cfg.pretaudem.slp, cfg.topoparam.slope)
    elif cfg.flow_model == 1:
        slope_rad_to_deg(cfg.pretaudem.dinf_slp, cfg.topoparam.slope)
    if cfg.rpi_method == 1:
        copy2(cfg.pretaudem.rpi_hydro, cfg.topoparam.rpi)
    else:
        copy2(cfg.pretaudem.rpi_skidmore, cfg.topoparam.rpi)
    copy2(cfg.pretaudem.dist2stream_v, cfg.topoparam.hand)
    copy2(cfg.pretaudem.filldem, cfg.topoparam.elev)

    if single_basin:  # clip RPI
        RasterUtilClass.mask_raster(cfg.topoparam.rpi, cfg.pretaudem.subbsn, cfg.topoparam.rpi)

    log_status.write('Preprocessing succeed!\n')
    end_t = time.time()
    cost = (end_t - start_t) / 60.
    log_status.write('Time consuming: %.2f min.\n' % cost)
    log_status.close()
    with open(cfg.log.runtime, 'a', encoding='utf-8') as logf:
        logf.write('Preprocessing Time-consuming: %s\n' % repr(cost))
    return cost
예제 #46
0
def main(landusef, unitsf, jsonout):
    """Construct common spatial units data in JSON file format."""
    # Check the file existence
    FileClass.check_file_exists(landusef)
    FileClass.check_file_exists(unitsf)
    # read raster data and check the extent based on landuse.
    landuser = RasterUtilClass.read_raster(landusef)
    data_landuse = landuser.data
    nrows = landuser.nRows
    ncols = landuser.nCols
    dx = landuser.dx
    nodata_landuse = landuser.noDataValue

    fieldr = RasterUtilClass.read_raster(unitsf)
    if fieldr.nRows != nrows or fieldr.nCols != ncols:
        raise ValueError('The spatial units raster MUST have the same dimensions'
                         ' with landuse!')
    data_units = fieldr.data
    nodata_units = fieldr.noDataValue

    units_info = dict()  # type: Dict[AnyStr, Dict[Union[int, AnyStr], Dict[AnyStr, Union[int, float, List[Union[int,float]], AnyStr, Dict[int, float]]]]]

    units_info.setdefault('units', dict())
    units_info.setdefault('overview', dict())

    units_ids = list()  # type: List[int]

    for m in range(nrows):
        for n in range(ncols):
            cur_lu = int(data_landuse[m][n])
            cur_unit = int(data_units[m][n])
            if cur_unit == nodata_units or cur_lu == nodata_landuse or cur_lu <= 0:
                continue
            if cur_unit not in units_ids:
                units_ids.append(cur_unit)
            if cur_unit not in units_info['units']:
                units_info['units'].setdefault(cur_unit, {'landuse': dict(),
                                                          'primarylanduse': 0,
                                                          'area': 0.})
            if cur_lu not in units_info['units'][cur_unit]['landuse']:
                units_info['units'][cur_unit]['landuse'][cur_lu] = 1
            else:
                units_info['units'][cur_unit]['landuse'][cur_lu] += 1
    for k, v in viewitems(units_info['units']):
        area_field = 0.
        area_max = 0.
        area_max_lu = 0
        for luid, luarea in viewitems(v['landuse']):
            v['landuse'][luid] = luarea * dx * dx * 1.e-6
            area_field += v['landuse'][luid]
            if v['landuse'][luid] > area_max:
                area_max = v['landuse'][luid]
                area_max_lu = luid
        v['area'] = area_field
        v['primarylanduse'] = area_max_lu

    units_info['overview'].setdefault('all_units', len(units_ids))

    # save to json
    json_data = json.dumps(units_info, indent=4)
    with open(json_out, 'w', encoding='utf-8') as f:
        f.write('%s' % json_data)
예제 #47
0
파일: TauDEM.py 프로젝트: alameday/PyGeoC
    def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False,
                              workingdir=None, mpi_bin=None, bin_dir=None,
                              logfile=None, runtime_file=None, hostfile=None):
        """Watershed Delineation."""
        # 1. Check directories
        if not os.path.exists(dem):
            TauDEM.error('DEM: %s is not existed!' % dem)
        dem = os.path.abspath(dem)
        if workingdir is None:
            workingdir = os.path.dirname(dem)
        namecfg = TauDEMFilesUtils(workingdir)
        workingdir = namecfg.workspace
        UtilClass.mkdir(workingdir)
        # 2. Check log file
        if logfile is not None and FileClass.is_file_exists(logfile):
            os.remove(logfile)
        # 3. Get predefined intermediate file names
        filled_dem = namecfg.filldem
        flow_dir = namecfg.d8flow
        slope = namecfg.slp
        flow_dir_dinf = namecfg.dinf
        slope_dinf = namecfg.dinf_slp
        dir_code_dinf = namecfg.dinf_d8dir
        weight_dinf = namecfg.dinf_weight
        acc = namecfg.d8acc
        stream_raster = namecfg.stream_raster
        default_outlet = namecfg.outlet_pre
        modified_outlet = namecfg.outlet_m
        stream_skeleton = namecfg.stream_pd
        acc_with_weight = namecfg.d8acc_weight
        stream_order = namecfg.stream_order
        ch_network = namecfg.channel_net
        ch_coord = namecfg.channel_coord
        stream_net = namecfg.streamnet_shp
        subbasin = namecfg.subbsn
        dist2_stream_d8 = namecfg.dist2stream_d8

        # 4. perform calculation
        UtilClass.writelog(logfile, "[Output] %d..., %s" % (10, "pitremove DEM..."), 'a')
        TauDEM.pitremove(np, dem, filled_dem, workingdir, mpi_bin, bin_dir,
                         log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, "[Output] %d..., %s" %
                           (20, "Calculating D8 and Dinf flow direction..."), 'a')
        TauDEM.d8flowdir(np, filled_dem, flow_dir, slope, workingdir,
                         mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        TauDEM.dinfflowdir(np, filled_dem, flow_dir_dinf, slope_dinf, workingdir,
                           mpi_bin, bin_dir, log_file=logfile,
                           runtime_file=runtime_file, hostfile=hostfile)
        DinfUtil.output_compressed_dinf(flow_dir_dinf, dir_code_dinf, weight_dinf)
        UtilClass.writelog(logfile, "[Output] %d..., %s" % (30, "D8 flow accumulation..."), 'a')
        TauDEM.aread8(np, flow_dir, acc, None, None, False, workingdir, mpi_bin, bin_dir,
                      log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, "[Output] %d..., %s" %
                           (40, "Generating stream raster initially..."), 'a')
        min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(acc)
        TauDEM.threshold(np, acc, stream_raster, mean_accum, workingdir,
                         mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, "[Output] %d..., %s" % (50, "Moving outlet to stream..."), 'a')
        if outlet_file is None:
            outlet_file = default_outlet
            TauDEM.connectdown(np, flow_dir, acc, outlet_file, wtsd=None,
                               workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir,
                               log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        TauDEM.moveoutletstostrm(np, flow_dir, stream_raster, outlet_file,
                                 modified_outlet, workingdir, mpi_bin, bin_dir,
                                 log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, "[Output] %d..., %s" %
                           (60, "Generating stream skeleton..."), 'a')
        TauDEM.peukerdouglas(np, filled_dem, stream_skeleton, workingdir,
                             mpi_bin, bin_dir, log_file=logfile,
                             runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, "[Output] %d..., %s" %
                           (70, "Flow accumulation with outlet..."), 'a')
        tmp_outlet = None
        if singlebasin:
            tmp_outlet = modified_outlet
        TauDEM.aread8(np, flow_dir, acc_with_weight, tmp_outlet, stream_skeleton, False,
                      workingdir, mpi_bin, bin_dir, log_file=logfile,
                      runtime_file=runtime_file, hostfile=hostfile)

        if thresh <= 0:  # find the optimal threshold using dropanalysis function
            UtilClass.writelog(logfile, "[Output] %d..., %s" %
                               (75, "Drop analysis to select optimal threshold..."), 'a')
            min_accum, max_accum, mean_accum, std_accum = \
                RasterUtilClass.raster_statistics(acc_with_weight)
            if mean_accum - std_accum < 0:
                minthresh = mean_accum
            else:
                minthresh = mean_accum - std_accum
            maxthresh = mean_accum + std_accum
            numthresh = 20
            logspace = 'true'
            drp_file = namecfg.drptxt
            TauDEM.dropanalysis(np, filled_dem, flow_dir, acc_with_weight,
                                acc_with_weight, modified_outlet, minthresh, maxthresh,
                                numthresh, logspace, drp_file, workingdir, mpi_bin, bin_dir,
                                log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
            if not FileClass.is_file_exists(drp_file):
                raise RuntimeError("Dropanalysis failed and drp.txt was not created!")
            with open(drp_file, 'r', encoding='utf-8') as drpf:
                temp_contents = drpf.read()
                (beg, thresh) = temp_contents.rsplit(' ', 1)
            print(thresh)
        UtilClass.writelog(logfile, "[Output] %d..., %s" % (80, "Generating stream raster..."), 'a')
        TauDEM.threshold(np, acc_with_weight, stream_raster, float(thresh),
                         workingdir, mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, "[Output] %d..., %s" % (90, "Generating stream net..."), 'a')
        TauDEM.streamnet(np, filled_dem, flow_dir, acc_with_weight, stream_raster,
                         modified_outlet, stream_order, ch_network,
                         ch_coord, stream_net, subbasin, workingdir, mpi_bin, bin_dir,
                         log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, "[Output] %d..., %s" %
                           (95, "Calculating distance to stream (D8)..."), 'a')
        TauDEM.d8hdisttostrm(np, flow_dir, stream_raster, dist2_stream_d8, 1,
                             workingdir, mpi_bin, bin_dir, log_file=logfile,
                             runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, "[Output] %d.., %s" %
                           (100, "Original subbasin delineation is finished!"), 'a')
예제 #48
0
    def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False,
                              workingdir=None, mpi_bin=None, bin_dir=None,
                              logfile=None, runtime_file=None, hostfile=None,
                              avoid_redo=False):
        """Watershed Delineation based on D8 flow direction.

        Args:
            np: process number for MPI
            dem: DEM path
            outlet_file: predefined outlet shapefile path
            thresh: predefined threshold for extracting stream from accumulated flow direction
            singlebasin: when set True, only extract subbasins that drains into predefined outlets
            workingdir: directory that store outputs
            mpi_bin: directory of MPI executable binary, e.g., mpiexec, mpirun
            bin_dir: directory of TauDEM and other executable binaries
            logfile: log file path
            runtime_file: runtime file path
            hostfile: host list file path for MPI
            avoid_redo: avoid executing some functions that do not depend on input arguments
                        when repeatedly invoke this function
        """
        # 1. Check directories
        if not os.path.exists(dem):
            TauDEM.error('DEM: %s is not existed!' % dem)
        dem = os.path.abspath(dem)
        if workingdir is None or workingdir is '':
            workingdir = os.path.dirname(dem)
        nc = TauDEMFilesUtils(workingdir)  # predefined names
        workingdir = nc.workspace
        UtilClass.mkdir(workingdir)
        # 2. Check log file
        if logfile is not None and FileClass.is_file_exists(logfile):
            os.remove(logfile)
        # 3. perform calculation
        # Filling DEM
        if not (avoid_redo and FileClass.is_file_exists(nc.filldem)):
            UtilClass.writelog(logfile, '[Output] %s' % 'remove pit...', 'a')
            TauDEM.pitremove(np, dem, nc.filldem, workingdir, mpi_bin, bin_dir,
                             log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        # Flow direction based on D8 algorithm
        if not (avoid_redo and FileClass.is_file_exists(nc.d8flow)):
            UtilClass.writelog(logfile, '[Output] %s' % 'D8 flow direction...', 'a')
            TauDEM.d8flowdir(np, nc.filldem, nc.d8flow, nc.slp, workingdir,
                             mpi_bin, bin_dir, log_file=logfile,
                             runtime_file=runtime_file, hostfile=hostfile)
        # Flow accumulation without stream skeleton as weight
        if not (avoid_redo and FileClass.is_file_exists(nc.d8acc)):
            UtilClass.writelog(logfile, '[Output] %s' % 'D8 flow accumulation...', 'a')
            TauDEM.aread8(np, nc.d8flow, nc.d8acc, None, None, False, workingdir, mpi_bin, bin_dir,
                          log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        # Initial stream network using mean accumulation as threshold
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating stream raster initially...', 'a')
        min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(nc.d8acc)
        TauDEM.threshold(np, nc.d8acc, nc.stream_raster, mean_accum, workingdir,
                         mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        # Outlets position initialization and adjustment
        UtilClass.writelog(logfile, '[Output] %s' % 'Moving outlet to stream...', 'a')
        if outlet_file is None:  # if not given, take cell with maximum accumulation as outlet
            outlet_file = nc.outlet_pre
            TauDEM.connectdown(np, nc.d8flow, nc.d8acc, outlet_file, nc.outlet_m, wtsd=None,
                               workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir,
                               log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        TauDEM.moveoutletstostrm(np, nc.d8flow, nc.stream_raster, outlet_file,
                                 nc.outlet_m, workingdir=workingdir,
                                 mpiexedir=mpi_bin, exedir=bin_dir,
                                 log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        # Stream skeleton by peuker-douglas algorithm
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating stream skeleton ...', 'a')
        TauDEM.peukerdouglas(np, nc.filldem, nc.stream_pd, workingdir,
                             mpi_bin, bin_dir, log_file=logfile,
                             runtime_file=runtime_file, hostfile=hostfile)
        # Weighted flow acculation with outlet
        UtilClass.writelog(logfile, '[Output] %s' % 'Flow accumulation with outlet...', 'a')
        tmp_outlet = None
        if singlebasin:
            tmp_outlet = nc.outlet_m
        TauDEM.aread8(np, nc.d8flow, nc.d8acc_weight, tmp_outlet, nc.stream_pd, False,
                      workingdir, mpi_bin, bin_dir, log_file=logfile,
                      runtime_file=runtime_file, hostfile=hostfile)
        # Determine threshold by input argument or dropanalysis function
        if thresh <= 0:  # find the optimal threshold using dropanalysis function
            UtilClass.writelog(logfile, '[Output] %s' %
                               'Drop analysis to select optimal threshold...', 'a')
            min_accum, max_accum, mean_accum, std_accum = \
                RasterUtilClass.raster_statistics(nc.d8acc_weight)
            if mean_accum - std_accum < 0:
                minthresh = mean_accum
            else:
                minthresh = mean_accum - std_accum
            maxthresh = mean_accum + std_accum
            TauDEM.dropanalysis(np, nc.filldem, nc.d8flow, nc.d8acc_weight,
                                nc.d8acc_weight, nc.outlet_m, minthresh, maxthresh,
                                20, 'true', nc.drptxt, workingdir, mpi_bin, bin_dir,
                                log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
            if not FileClass.is_file_exists(nc.drptxt):
                # raise RuntimeError('Dropanalysis failed and drp.txt was not created!')
                UtilClass.writelog(logfile, '[Output] %s' %
                                   'dropanalysis failed!', 'a')
                thresh = 0.5 * (maxthresh - minthresh) + minthresh
            else:
                with open(nc.drptxt, 'r', encoding='utf-8') as drpf:
                    temp_contents = drpf.read()
                    (beg, thresh) = temp_contents.rsplit(' ', 1)
            thresh = float(thresh)
            UtilClass.writelog(logfile, '[Output] %s: %f' %
                               ('Selected optimal threshold: ', thresh), 'a')
        # Final stream network
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating stream raster...', 'a')
        TauDEM.threshold(np, nc.d8acc_weight, nc.stream_raster, thresh,
                         workingdir, mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating stream net...', 'a')
        TauDEM.streamnet(np, nc.filldem, nc.d8flow, nc.d8acc_weight, nc.stream_raster,
                         nc.outlet_m, nc.stream_order, nc.channel_net,
                         nc.channel_coord, nc.streamnet_shp, nc.subbsn,
                         workingdir, mpi_bin, bin_dir,
                         log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        # Serialize IDs of subbasins and the corresponding streams
        UtilClass.writelog(logfile, '[Output] %s' % 'Serialize subbasin&stream IDs...', 'a')
        id_map = StreamnetUtil.serialize_streamnet(nc.streamnet_shp, nc.streamnet_m)
        RasterUtilClass.raster_reclassify(nc.subbsn, id_map, nc.subbsn_m, GDT_Int32)
        StreamnetUtil.assign_stream_id_raster(nc.stream_raster, nc.subbsn_m, nc.stream_m)
        # convert raster to shapefile (for subbasin and basin)
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating subbasin vector...', 'a')
        VectorUtilClass.raster2shp(nc.subbsn_m, nc.subbsn_shp, 'subbasin', 'SUBBASINID')
        # Finish the workflow
        UtilClass.writelog(logfile, '[Output] %s' %
                           'Original subbasin delineation is finished!', 'a')