コード例 #1
0
    def write_asc_file(filename, data, xsize, ysize, geotransform,
                       nodata_value):
        """Output Raster to ASCII file.

        Args:
            filename: output ASCII filename.
            data: 2D array data.
            xsize: Col count.
            ysize: Row count.
            geotransform: geographic transformation.
            nodata_value: nodata_flow value.
        """
        UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename)))
        header = 'NCOLS %d\n' \
                 'NROWS %d\n' \
                 'XLLCENTER %f\n' \
                 'YLLCENTER %f\n' \
                 'CELLSIZE %f\n' \
                 'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1],
                                      geotransform[3] - (ysize - 0.5) * geotransform[1],
                                      geotransform[1], nodata_value)

        with open(filename, 'w', encoding='utf-8') as f:
            f.write(header)
            for i in range(0, ysize):
                for j in range(0, xsize):
                    f.write('%s\t' % repr(data[i][j]))
                f.write('\n')
        f.close()
コード例 #2
0
def write_autofuzslppos_config_file(ini_name, bin, wp, data_path, dem_name):
    UtilClass.mkdir(wp)
    org_ini_file = data_path + os.sep + ini_name
    demf = data_path + os.sep + 'inputs' + os.sep + dem_name
    if not os.path.isfile(org_ini_file):
        print('%s file is not existed!' % org_ini_file)
        exit(-1)
    dst_int_file = wp + os.sep + ini_name

    cfg_items = list()
    with open(org_ini_file, 'r') as f:
        for line in f.readlines():
            cfg_items.append(line.strip())
    # print cfg_items
    cfg_items.append('[REQUIRED]')
    cfg_items.append('exeDir = %s' % bin)
    cfg_items.append('rootDir = %s' % wp)
    cfg_items.append('rawdem = %s' % demf)

    with open(dst_int_file, 'w') as f:
        for item in cfg_items:
            f.write(item + '\n')

    cf = ConfigParser()
    cf.read(dst_int_file)
    return AutoFuzSlpPosConfig(cf)
コード例 #3
0
ファイル: raster.py プロジェクト: crazyzlj/PyGeoC
    def write_gtiff_file(f_name, n_rows, n_cols, data, geotransform, srs, nodata_value,
                         gdal_type=GDT_Float32):
        """Output Raster to GeoTiff format file.

        Args:
            f_name: output gtiff file name.
            n_rows: Row count.
            n_cols: Col count.
            data: 2D array data.
            geotransform: geographic transformation.
            srs: coordinate system.
            nodata_value: nodata value.
            gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,
                                                                  GDT_Float32 as default.
        """
        UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name)))
        driver = gdal_GetDriverByName(str('GTiff'))
        try:
            ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type)
        except Exception:
            print('Cannot create output file %s' % f_name)
            return
        ds.SetGeoTransform(geotransform)
        try:
            ds.SetProjection(srs.ExportToWkt())
        except AttributeError or Exception:
            ds.SetProjection(srs)
        ds.GetRasterBand(1).SetNoDataValue(nodata_value)
        # if data contains numpy.nan, then replaced by nodata_value
        if isinstance(data, numpy.ndarray) and data.dtype in [numpy.dtype('int'),
                                                              numpy.dtype('float')]:
            data = numpy.where(numpy.isnan(data), nodata_value, data)
        ds.GetRasterBand(1).WriteArray(data)
        ds = None
コード例 #4
0
ファイル: raster.py プロジェクト: crazyzlj/PyGeoC
    def write_asc_file(filename, data, xsize, ysize, geotransform, nodata_value):
        """Output Raster to ASCII file.

        Args:
            filename: output ASCII filename.
            data: 2D array data.
            xsize: Col count.
            ysize: Row count.
            geotransform: geographic transformation.
            nodata_value: nodata_flow value.
        """
        UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(filename)))
        header = 'NCOLS %d\n' \
                 'NROWS %d\n' \
                 'XLLCENTER %f\n' \
                 'YLLCENTER %f\n' \
                 'CELLSIZE %f\n' \
                 'NODATA_VALUE %f' % (xsize, ysize, geotransform[0] + 0.5 * geotransform[1],
                                      geotransform[3] - (ysize - 0.5) * geotransform[1],
                                      geotransform[1], nodata_value)

        with open(filename, 'w', encoding='utf-8') as f:
            f.write(header)
            for i in range(0, ysize):
                for j in range(0, xsize):
                    f.write('%s\t' % repr(data[i][j]))
                f.write('\n')
        f.close()
コード例 #5
0
ファイル: sd_delineation.py プロジェクト: zhizihuaxia/SEIMS
    def mask_origin_delineated_data(cfg):
        """Mask the original delineated data by Subbasin raster."""
        subbasin_tau_file = cfg.taudems.subbsn
        geodata2dbdir = cfg.dirs.geodata2db
        UtilClass.mkdir(geodata2dbdir)
        mask_file = cfg.spatials.mask
        RasterUtilClass.get_mask_from_raster(subbasin_tau_file, mask_file)
        # Total 12 raster files
        original_files = [
            cfg.taudems.subbsn, cfg.taudems.d8flow, cfg.taudems.stream_raster,
            cfg.taudems.slp, cfg.taudems.filldem, cfg.taudems.d8acc,
            cfg.taudems.stream_order, cfg.taudems.dinf, cfg.taudems.dinf_d8dir,
            cfg.taudems.dinf_slp, cfg.taudems.dinf_weight,
            cfg.taudems.dist2stream_d8
        ]
        # output masked files
        output_files = [
            cfg.taudems.subbsn_m, cfg.taudems.d8flow_m, cfg.taudems.stream_m,
            cfg.spatials.slope, cfg.spatials.filldem, cfg.spatials.d8acc,
            cfg.spatials.stream_order, cfg.spatials.dinf,
            cfg.spatials.dinf_d8dir, cfg.spatials.dinf_slp,
            cfg.spatials.dinf_weight, cfg.spatials.dist2stream_d8
        ]

        default_values = list()
        for i in range(len(original_files)):
            default_values.append(DEFAULT_NODATA)

        # other input rasters need to be masked
        # soil and landuse
        FileClass.check_file_exists(cfg.soil)
        FileClass.check_file_exists(cfg.landuse)
        original_files.append(cfg.soil)
        output_files.append(cfg.spatials.soil_type)
        default_values.append(cfg.default_soil)
        original_files.append(cfg.landuse)
        output_files.append(cfg.spatials.landuse)
        default_values.append(cfg.default_landuse)

        # Additional raster file
        for k, v in cfg.additional_rs.items():
            org_v = v
            if not FileClass.is_file_exists(org_v):
                v = cfg.spatial_dir + os.path.sep + org_v
                if not FileClass.is_file_exists(v):
                    print('WARNING: The additional file %s MUST be located in '
                          'SPATIAL_DATA_DIR, or provided as full file path!' %
                          k)
                    continue
            original_files.append(v)
            output_files.append(cfg.dirs.geodata2db + os.path.sep + k + '.tif')
            default_values.append(DEFAULT_NODATA)

        config_file = cfg.logs.mask_cfg
        # run mask operation
        print('Mask original delineated data by Subbasin raster...')
        SpatialDelineation.mask_raster_cpp(cfg.seims_bin, mask_file,
                                           original_files, output_files,
                                           default_values, config_file)
コード例 #6
0
ファイル: config.py プロジェクト: shiyaxingstar/SEIMS
 def __init__(self, wp):
     """Initialization."""
     self.param_defs_json = wp + os.path.sep + 'param_defs.json'
     self.param_values_txt = wp + os.path.sep + 'param_values.txt'
     self.output_values_dir = wp + os.path.sep + 'temp_output_values'
     self.output_values_txt = wp + os.path.sep + 'output_values.txt'
     self.psa_si_json = wp + os.path.sep + 'psa_si.json'
     self.psa_si_sort_txt = wp + os.path.sep + 'psa_si_sorted.csv'
     UtilClass.mkdir(self.output_values_dir)
コード例 #7
0
    def __init__(self, cf, method='morris'):
        """Initialization."""
        self.method = method
        # 1. SEIMS model related
        self.model = ParseSEIMSConfig(cf)
        # 2. Common settings of parameters sensitivity analysis
        if 'PSA_Settings' not in cf.sections():
            raise ValueError(
                "[PSA_Settings] section MUST be existed in *.ini file.")

        self.evaluate_params = list()
        if cf.has_option('PSA_Settings', 'evaluateparam'):
            eva_str = cf.get('PSA_Settings', 'evaluateparam')
            self.evaluate_params = StringClass.split_string(eva_str, ',')
        else:
            self.evaluate_params = ['Q']  # Default

        self.param_range_def = 'morris_param_rng.def'  # Default
        if cf.has_option('PSA_Settings', 'paramrngdef'):
            self.param_range_def = cf.get('PSA_Settings', 'paramrngdef')
        self.param_range_def = self.model.model_dir + os.path.sep + self.param_range_def
        if not FileClass.is_file_exists(self.param_range_def):
            raise IOError('Ranges of parameters MUST be provided!')

        if not (cf.has_option('PSA_Settings', 'psa_time_start')
                and cf.has_option('PSA_Settings', 'psa_time_end')):
            raise ValueError(
                "Start and end time of PSA MUST be specified in [PSA_Settings]."
            )
        try:
            # UTCTIME
            tstart = cf.get('PSA_Settings', 'psa_time_start')
            tend = cf.get('PSA_Settings', 'psa_time_end')
            self.psa_stime = StringClass.get_datetime(tstart)
            self.psa_etime = StringClass.get_datetime(tend)
        except ValueError:
            raise ValueError('The time format MUST be"YYYY-MM-DD HH:MM:SS".')
        if self.psa_stime >= self.psa_etime:
            raise ValueError("Wrong time settings in [PSA_Settings]!")

        # 3. Parameters settings for specific sensitivity analysis methods
        self.morris = None
        self.fast = None
        if self.method == 'fast':
            self.fast = FASTConfig(cf)
            self.psa_outpath = '%s/PSA_FAST_N%dM%d' % (
                self.model.model_dir, self.fast.N, self.fast.M)
        elif self.method == 'morris':
            self.morris = MorrisConfig(cf)
            self.psa_outpath = '%s/PSA_Morris_N%dL%d' % (
                self.model.model_dir, self.morris.N, self.morris.num_levels)
        # 4. (Optional) Plot settings for matplotlib
        self.plot_cfg = PlotConfig(cf)

        # Do not remove psa_outpath if already existed
        UtilClass.mkdir(self.psa_outpath)
        self.outfiles = PSAOutputs(self.psa_outpath)
コード例 #8
0
ファイル: config.py プロジェクト: crazyzlj/SEIMS
 def __init__(self, wp):
     """Initialization."""
     self.param_defs_json = wp + os.path.sep + 'param_defs.json'
     self.param_values_txt = wp + os.path.sep + 'param_values.txt'
     self.output_values_dir = wp + os.path.sep + 'temp_output_values'
     self.output_values_txt = wp + os.path.sep + 'output_values.txt'
     self.psa_si_json = wp + os.path.sep + 'psa_si.json'
     self.psa_si_sort_txt = wp + os.path.sep + 'psa_si_sorted.csv'
     UtilClass.mkdir(self.output_values_dir)
コード例 #9
0
ファイル: utility.py プロジェクト: crazyzlj/SEIMS_For_Gitbook
def save_png_eps(plot, wp, name):
    """Save figures, both png and eps formats"""
    eps_dir = wp + os.path.sep + 'eps'
    pdf_dir = wp + os.path.sep + 'pdf'
    UtilClass.mkdir(eps_dir)
    UtilClass.mkdir(pdf_dir)
    for figpath in [wp + os.path.sep + name + '.png',
                    eps_dir + os.path.sep + name + '.eps',
                    pdf_dir + os.path.sep + name + '.pdf']:
        plot.savefig(figpath, dpi=300)
コード例 #10
0
ファイル: config.py プロジェクト: crazyzlj/SEIMS
    def __init__(self, cf, method='morris'):
        """Initialization."""
        self.method = method
        # 1. SEIMS model related
        self.model = ParseSEIMSConfig(cf)
        # 2. Common settings of parameters sensitivity analysis
        if 'PSA_Settings' not in cf.sections():
            raise ValueError("[PSA_Settings] section MUST be existed in *.ini file.")

        self.evaluate_params = list()
        if cf.has_option('PSA_Settings', 'evaluateparam'):
            eva_str = cf.get('PSA_Settings', 'evaluateparam')
            self.evaluate_params = StringClass.split_string(eva_str, ',')
        else:
            self.evaluate_params = ['Q']  # Default

        self.param_range_def = 'morris_param_rng.def'  # Default
        if cf.has_option('PSA_Settings', 'paramrngdef'):
            self.param_range_def = cf.get('PSA_Settings', 'paramrngdef')
        self.param_range_def = self.model.model_dir + os.path.sep + self.param_range_def
        if not FileClass.is_file_exists(self.param_range_def):
            raise IOError('Ranges of parameters MUST be provided!')

        if not (cf.has_option('PSA_Settings', 'psa_time_start') and
                cf.has_option('PSA_Settings', 'psa_time_end')):
            raise ValueError("Start and end time of PSA MUST be specified in [PSA_Settings].")
        try:
            # UTCTIME
            tstart = cf.get('PSA_Settings', 'psa_time_start')
            tend = cf.get('PSA_Settings', 'psa_time_end')
            self.psa_stime = StringClass.get_datetime(tstart)
            self.psa_etime = StringClass.get_datetime(tend)
        except ValueError:
            raise ValueError('The time format MUST be"YYYY-MM-DD HH:MM:SS".')
        if self.psa_stime >= self.psa_etime:
            raise ValueError("Wrong time settings in [PSA_Settings]!")

        # 3. Parameters settings for specific sensitivity analysis methods
        self.morris = None
        self.fast = None
        if self.method == 'fast':
            self.fast = FASTConfig(cf)
            self.psa_outpath = '%s/PSA-FAST-N%dM%d' % (self.model.model_dir,
                                                       self.fast.N, self.fast.M)
        elif self.method == 'morris':
            self.morris = MorrisConfig(cf)
            self.psa_outpath = '%s/PSA-Morris-N%dL%dJ%d' % (self.model.model_dir,
                                                            self.morris.N,
                                                            self.morris.num_levels,
                                                            self.morris.grid_jump)

        # Do not remove psa_outpath if already existed
        UtilClass.mkdir(self.psa_outpath)
        self.outfiles = PSAOutputs(self.psa_outpath)
コード例 #11
0
ファイル: sd_delineation.py プロジェクト: crazyzlj/SEIMS
    def mask_origin_delineated_data(cfg):
        """Mask the original delineated data by Subbasin raster."""
        subbasin_tau_file = cfg.taudems.subbsn
        geodata2dbdir = cfg.dirs.geodata2db
        UtilClass.mkdir(geodata2dbdir)
        mask_file = cfg.spatials.mask
        RasterUtilClass.get_mask_from_raster(subbasin_tau_file, mask_file)
        # Total 12 raster files
        original_files = [cfg.taudems.subbsn, cfg.taudems.d8flow, cfg.taudems.stream_raster,
                          cfg.taudems.slp, cfg.taudems.filldem, cfg.taudems.d8acc,
                          cfg.taudems.stream_order, cfg.taudems.dinf, cfg.taudems.dinf_d8dir,
                          cfg.taudems.dinf_slp, cfg.taudems.dinf_weight,
                          cfg.taudems.dist2stream_d8]
        # output masked files
        output_files = [cfg.taudems.subbsn_m, cfg.taudems.d8flow_m, cfg.taudems.stream_m,
                        cfg.spatials.slope, cfg.spatials.filldem, cfg.spatials.d8acc,
                        cfg.spatials.stream_order, cfg.spatials.dinf, cfg.spatials.dinf_d8dir,
                        cfg.spatials.dinf_slp, cfg.spatials.dinf_weight,
                        cfg.spatials.dist2stream_d8]

        default_values = list()
        for i in range(len(original_files)):
            default_values.append(DEFAULT_NODATA)

        # other input rasters need to be masked
        # soil and landuse
        FileClass.check_file_exists(cfg.soil)
        FileClass.check_file_exists(cfg.landuse)
        original_files.append(cfg.soil)
        output_files.append(cfg.spatials.soil_type)
        default_values.append(cfg.default_soil)
        original_files.append(cfg.landuse)
        output_files.append(cfg.spatials.landuse)
        default_values.append(cfg.default_landuse)

        # Additional raster file
        for k, v in cfg.additional_rs.items():
            org_v = v
            if not FileClass.is_file_exists(org_v):
                v = cfg.spatial_dir + os.path.sep + org_v
                if not FileClass.is_file_exists(v):
                    print('WARNING: The additional file %s MUST be located in '
                          'SPATIAL_DATA_DIR, or provided as full file path!' % k)
                    continue
            original_files.append(v)
            output_files.append(cfg.dirs.geodata2db + os.path.sep + k + '.tif')
            default_values.append(DEFAULT_NODATA)

        config_file = cfg.logs.mask_cfg
        # run mask operation
        print('Mask original delineated data by Subbasin raster...')
        SpatialDelineation.mask_raster_cpp(cfg.seims_bin, mask_file, original_files,
                                           output_files, default_values, config_file)
コード例 #12
0
ファイル: sd_delineation.py プロジェクト: crazyzlj/SEIMS
 def original_delineation(cfg):
     """Original Delineation by calling TauDEM functions"""
     # Check directories
     UtilClass.mkdir(cfg.workspace)
     UtilClass.mkdir(cfg.dirs.log)
     bin_dir = cfg.seims_bin
     mpi_bin = cfg.mpi_bin
     np = cfg.np
     TauDEMWorkflow.watershed_delineation(np, cfg.dem, cfg.outlet_file, cfg.d8acc_threshold,
                                          True,
                                          cfg.dirs.taudem, mpi_bin, bin_dir,
                                          cfg.logs.delineation)
コード例 #13
0
 def original_delineation(cfg):
     """Original Delineation by calling TauDEM functions"""
     # Check directories
     UtilClass.mkdir(cfg.workspace)
     UtilClass.mkdir(cfg.dirs.log)
     bin_dir = cfg.seims_bin
     mpi_bin = cfg.mpi_bin
     np = cfg.np
     TauDEMWorkflow.watershed_delineation(np, cfg.dem, cfg.outlet_file, cfg.d8acc_threshold,
                                          True,
                                          cfg.dirs.taudem, mpi_bin, bin_dir,
                                          cfg.logs.delineation)
コード例 #14
0
ファイル: plot.py プロジェクト: zhizihuaxia/SEIMS
def save_png_eps(plot, wp, name, plot_cfg=None):
    # type: (plt, AnyStr, AnyStr, Optional[PlotConfig]) -> None
    """Save figures, both png and eps formats"""
    # plot.tight_layout()
    if plot_cfg is None:
        plot_cfg = PlotConfig()
    if plot_cfg.plot_cn:
        wp = wp + os.path.sep + 'cn'
        UtilClass.mkdir(wp)
    for fmt in plot_cfg.fmts:
        fmt_dir = wp + os.path.sep + fmt
        UtilClass.mkdir(fmt_dir)
        figpath = fmt_dir + os.path.sep + name + '.' + fmt
        plot.savefig(figpath, dpi=plot_cfg.dpi)
コード例 #15
0
ファイル: sd_delineation.py プロジェクト: zhizihuaxia/SEIMS
    def post_process_of_delineated_data(cfg):
        """Do some necessary transfer for subbasin, stream, and flow direction raster."""
        # inputs
        stream_net_file = cfg.taudems.streamnet_shp
        subbasin_file = cfg.taudems.subbsn_m
        flow_dir_file_tau = cfg.taudems.d8flow_m
        stream_raster_file = cfg.taudems.stream_m
        # outputs
        # -- shapefile
        shp_dir = cfg.dirs.geoshp
        UtilClass.mkdir(shp_dir)
        # ---- outlet, copy from DirNameUtils.TauDEM
        FileClass.copy_files(cfg.taudems.outlet_m, cfg.vecs.outlet)
        # ---- reaches
        output_reach_file = cfg.vecs.reach
        # ---- subbasins
        subbasin_vector_file = cfg.vecs.subbsn
        # -- raster file
        output_subbasin_file = cfg.spatials.subbsn
        output_flow_dir_file = cfg.spatials.d8flow
        output_stream_link_file = cfg.spatials.stream_link
        output_hillslope_file = cfg.spatials.hillslope

        id_map = StreamnetUtil.serialize_streamnet(stream_net_file,
                                                   output_reach_file)
        RasterUtilClass.raster_reclassify(subbasin_file, id_map,
                                          output_subbasin_file, GDT_Int32)
        StreamnetUtil.assign_stream_id_raster(stream_raster_file,
                                              output_subbasin_file,
                                              output_stream_link_file)

        # Convert D8 encoding rule to ArcGIS
        D8Util.convert_code(flow_dir_file_tau, output_flow_dir_file)

        # convert raster to shapefile (for subbasin and basin)
        print('Generating subbasin vector...')
        VectorUtilClass.raster2shp(output_subbasin_file, subbasin_vector_file,
                                   'subbasin', FieldNames.subbasin_id)
        mask_file = cfg.spatials.mask
        basin_vector = cfg.vecs.bsn
        print('Generating basin vector...')
        VectorUtilClass.raster2shp(mask_file, basin_vector, 'basin',
                                   FieldNames.basin)
        # delineate hillslope
        DelineateHillslope.downstream_method_whitebox(output_stream_link_file,
                                                      flow_dir_file_tau,
                                                      output_hillslope_file)
コード例 #16
0
ファイル: demo_config.py プロジェクト: crazyzlj/SEIMS
 def __init__(self, bpath, data_dir_name, model_dir_name):
     self.mpi_bin = None
     self.bin_dir = bpath + os.path.sep + 'bin'
     self.prescript_dir = bpath + os.path.sep + 'seims' + os.path.sep + 'preprocess'
     self.base_dir = bpath + os.path.sep + 'data' + os.path.sep + data_dir_name
     self.cfg_dir = self.base_dir + os.path.sep + 'model_configs'
     self.model_dir = self.base_dir + os.path.sep + model_dir_name
     self.data_dir = self.base_dir + os.path.sep + 'data_prepare'
     self.clim_dir = self.data_dir + os.path.sep + 'climate'
     self.spatial_dir = self.data_dir + os.path.sep + 'spatial'
     self.observe_dir = self.data_dir + os.path.sep + 'observed'
     self.scenario_dir = self.data_dir + os.path.sep + 'scenario'
     self.lookup_dir = self.data_dir + os.path.sep + 'lookup'
     self.workspace = self.base_dir + os.path.sep + 'workspace'
     UtilClass.mkdir(self.workspace)
     print('SEIMS binary location: %s' % self.bin_dir)
     print('Demo data location: %s' % self.base_dir)
     print('Data preprocess location: %s' % self.workspace)
コード例 #17
0
 def __init__(self, bpath, data_dir_name, model_dir_name):
     self.mpi_bin = None
     self.bin_dir = bpath + os.path.sep + 'bin'
     self.prescript_dir = bpath + os.path.sep + 'seims' + os.path.sep + 'preprocess'
     self.base_dir = bpath + os.path.sep + 'data' + os.path.sep + data_dir_name
     self.cfg_dir = self.base_dir + os.path.sep + 'model_configs'
     self.model_dir = self.base_dir + os.path.sep + model_dir_name
     self.data_dir = self.base_dir + os.path.sep + 'data_prepare'
     self.clim_dir = self.data_dir + os.path.sep + 'climate'
     self.spatial_dir = self.data_dir + os.path.sep + 'spatial'
     self.observe_dir = self.data_dir + os.path.sep + 'observed'
     self.scenario_dir = self.data_dir + os.path.sep + 'scenario'
     self.lookup_dir = self.data_dir + os.path.sep + 'lookup'
     self.workspace = self.base_dir + os.path.sep + 'workspace'
     UtilClass.mkdir(self.workspace)
     print('SEIMS binary location: %s' % self.bin_dir)
     print('Demo data location: %s' % self.base_dir)
     print('Data preprocessing location: %s' % self.workspace)
コード例 #18
0
 def prepare_node_with_weight_for_metis(graph, weight, wp):
     # construct the METIS input file
     UtilClass.mkdir(wp)
     metis_input = r'%s/metis.txt' % wp
     ns = graph.nodes()
     with open(metis_input, 'w') as f:
         f.write(str(len(ns)) + '\t' + str(len(graph.edges())) + '\t' + '010\t1\n')
         for node in ns:
             if node <= 0:
                 continue
             f.write(str(weight[node][ImportReaches2Mongo._NUMCELLS]) + '\t')
             for e in graph.out_edges(node):
                 if e[1] > 0:
                     f.write(str(e[1]) + '\t')
             for e in graph.in_edges(node):
                 if e[0] > 0:
                     f.write(str(e[0]) + '\t')
             f.write('\n')
     return metis_input
コード例 #19
0
ファイル: sd_delineation.py プロジェクト: crazyzlj/SEIMS
    def post_process_of_delineated_data(cfg):
        """Do some necessary transfer for subbasin, stream, and flow direction raster."""
        # inputs
        stream_net_file = cfg.taudems.streamnet_shp
        subbasin_file = cfg.taudems.subbsn_m
        flow_dir_file_tau = cfg.taudems.d8flow_m
        stream_raster_file = cfg.taudems.stream_m
        # outputs
        # -- shapefile
        shp_dir = cfg.dirs.geoshp
        UtilClass.mkdir(shp_dir)
        # ---- outlet, copy from DirNameUtils.TauDEM
        FileClass.copy_files(cfg.taudems.outlet_m, cfg.vecs.outlet)
        # ---- reaches
        output_reach_file = cfg.vecs.reach
        # ---- subbasins
        subbasin_vector_file = cfg.vecs.subbsn
        # -- raster file
        output_subbasin_file = cfg.spatials.subbsn
        output_flow_dir_file = cfg.spatials.d8flow
        output_stream_link_file = cfg.spatials.stream_link
        output_hillslope_file = cfg.spatials.hillslope

        id_map = StreamnetUtil.serialize_streamnet(stream_net_file, output_reach_file)
        RasterUtilClass.raster_reclassify(subbasin_file, id_map, output_subbasin_file, GDT_Int32)
        StreamnetUtil.assign_stream_id_raster(stream_raster_file, output_subbasin_file,
                                              output_stream_link_file)

        # Convert D8 encoding rule to ArcGIS
        D8Util.convert_code(flow_dir_file_tau, output_flow_dir_file)

        # convert raster to shapefile (for subbasin and basin)
        print('Generating subbasin vector...')
        VectorUtilClass.raster2shp(output_subbasin_file, subbasin_vector_file, 'subbasin',
                                   FieldNames.subbasin_id)
        mask_file = cfg.spatials.mask
        basin_vector = cfg.vecs.bsn
        print('Generating basin vector...')
        VectorUtilClass.raster2shp(mask_file, basin_vector, 'basin', FieldNames.basin)
        # delineate hillslope
        DelineateHillslope.downstream_method_whitebox(output_stream_link_file, flow_dir_file_tau,
                                                      output_hillslope_file)
コード例 #20
0
    def write_gtiff_file(f_name,
                         n_rows,
                         n_cols,
                         data,
                         geotransform,
                         srs,
                         nodata_value,
                         gdal_type=GDT_Float32):
        """Output Raster to GeoTiff format file.

        Args:
            f_name: output gtiff file name.
            n_rows: Row count.
            n_cols: Col count.
            data: 2D array data.
            geotransform: geographic transformation.
            srs: coordinate system.
            nodata_value: nodata value.
            gdal_type (:obj:`pygeoc.raster.GDALDataType`): output raster data type,
                                                                  GDT_Float32 as default.
        """
        UtilClass.mkdir(os.path.dirname(FileClass.get_file_fullpath(f_name)))
        driver = gdal_GetDriverByName(str('GTiff'))
        try:
            ds = driver.Create(f_name, n_cols, n_rows, 1, gdal_type)
        except Exception:
            print('Cannot create output file %s' % f_name)
            return
        ds.SetGeoTransform(geotransform)
        try:
            ds.SetProjection(srs.ExportToWkt())
        except AttributeError or Exception:
            ds.SetProjection(srs)
        ds.GetRasterBand(1).SetNoDataValue(nodata_value)
        # if data contains numpy.nan, then replaced by nodata_value
        if isinstance(data, numpy.ndarray) and data.dtype in [
                numpy.dtype('int'), numpy.dtype('float')
        ]:
            data = numpy.where(numpy.isnan(data), nodata_value, data)
        ds.GetRasterBand(1).WriteArray(data)
        ds = None
コード例 #21
0
 def prepare_node_with_weight_for_metis(graph, weight, wp):
     # construct the METIS input file
     UtilClass.mkdir(wp)
     metis_input = r'%s/metis.txt' % wp
     ns = graph.nodes()
     with open(metis_input, 'w') as f:
         f.write(
             str(len(ns)) + '\t' + str(len(graph.edges())) + '\t' +
             '010\t1\n')
         for node in ns:
             if node <= 0:
                 continue
             f.write(
                 str(weight[node][ImportReaches2Mongo._NUMCELLS]) + '\t')
             for e in graph.out_edges(node):
                 if e[1] > 0:
                     f.write(str(e[1]) + '\t')
             for e in graph.in_edges(node):
                 if e[0] > 0:
                     f.write(str(e[0]) + '\t')
             f.write('\n')
     return metis_input
コード例 #22
0
 def prepare_node_with_weight_for_metis(graph, weight, wp):
     # construct the METIS input file
     UtilClass.mkdir(wp)
     metis_input = '%s/metis.txt' % wp
     ns = list(graph.nodes())
     ns.sort()
     with open(metis_input, 'w', encoding='utf-8') as f:
         f.write('%s\t%s\t010\t1\n' %
                 (repr(len(ns)), repr(len(graph.edges()))))
         for node in ns:
             if node <= 0:
                 continue
             tmp_line = '%s\t' % repr(
                 weight[node][ImportReaches2Mongo._NUMCELLS])
             for e in graph.out_edges(node):
                 if e[1] > 0:
                     tmp_line += '%s\t' % repr(e[1])
             for e in graph.in_edges(node):
                 if e[0] > 0:
                     tmp_line += '%s\t' % repr(e[0])
             f.write('%s\n' % tmp_line)
     return metis_input
コード例 #23
0
ファイル: Nomenclature.py プロジェクト: jx-qqq/SEIMS
    def __init__(self, root_dir):
        """Create workspace directories for outputs.
        Args:
            root_dir: Root directory
        """
        if not os.path.isdir(root_dir):
            try:
                os.makedirs(root_dir)
            except Exception:  # failed of any types
                root_dir = UtilClass.current_path(
                    lambda: 0) + os.sep + "FuzzySlpPos"
                os.mkdir(root_dir)
        self.root_dir = root_dir

        self.pre_dir = self.root_dir + os.sep + "PreDir"
        self.param_dir = self.root_dir + os.sep + "Params"
        self.log_dir = self.root_dir + os.sep + "Log"
        self.output_dir = self.root_dir + os.sep + "FuzzySlpPos"
        self.typloc_dir = self.root_dir + os.sep + "TypLoc"
        self.conf_dir = self.root_dir + os.sep + "Config"

        UtilClass.mkdir(self.pre_dir)
        UtilClass.mkdir(self.param_dir)
        UtilClass.mkdir(self.output_dir)
        UtilClass.mkdir(self.log_dir)
        UtilClass.mkdir(self.typloc_dir)
        UtilClass.mkdir(self.conf_dir)
コード例 #24
0
    def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False,
                              workingdir=None, mpi_bin=None, bin_dir=None,
                              logfile=None, runtime_file=None, hostfile=None,
                              avoid_redo=False):
        """Watershed Delineation based on D8 flow direction.

        Args:
            np: process number for MPI
            dem: DEM path
            outlet_file: predefined outlet shapefile path
            thresh: predefined threshold for extracting stream from accumulated flow direction
            singlebasin: when set True, only extract subbasins that drains into predefined outlets
            workingdir: directory that store outputs
            mpi_bin: directory of MPI executable binary, e.g., mpiexec, mpirun
            bin_dir: directory of TauDEM and other executable binaries
            logfile: log file path
            runtime_file: runtime file path
            hostfile: host list file path for MPI
            avoid_redo: avoid executing some functions that do not depend on input arguments
                        when repeatedly invoke this function
        """
        # 1. Check directories
        if not os.path.exists(dem):
            TauDEM.error('DEM: %s is not existed!' % dem)
        dem = os.path.abspath(dem)
        if workingdir is None or workingdir is '':
            workingdir = os.path.dirname(dem)
        nc = TauDEMFilesUtils(workingdir)  # predefined names
        workingdir = nc.workspace
        UtilClass.mkdir(workingdir)
        # 2. Check log file
        if logfile is not None and FileClass.is_file_exists(logfile):
            os.remove(logfile)
        # 3. perform calculation
        # Filling DEM
        if not (avoid_redo and FileClass.is_file_exists(nc.filldem)):
            UtilClass.writelog(logfile, '[Output] %s' % 'remove pit...', 'a')
            TauDEM.pitremove(np, dem, nc.filldem, workingdir, mpi_bin, bin_dir,
                             log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        # Flow direction based on D8 algorithm
        if not (avoid_redo and FileClass.is_file_exists(nc.d8flow)):
            UtilClass.writelog(logfile, '[Output] %s' % 'D8 flow direction...', 'a')
            TauDEM.d8flowdir(np, nc.filldem, nc.d8flow, nc.slp, workingdir,
                             mpi_bin, bin_dir, log_file=logfile,
                             runtime_file=runtime_file, hostfile=hostfile)
        # Flow accumulation without stream skeleton as weight
        if not (avoid_redo and FileClass.is_file_exists(nc.d8acc)):
            UtilClass.writelog(logfile, '[Output] %s' % 'D8 flow accumulation...', 'a')
            TauDEM.aread8(np, nc.d8flow, nc.d8acc, None, None, False, workingdir, mpi_bin, bin_dir,
                          log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        # Initial stream network using mean accumulation as threshold
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating stream raster initially...', 'a')
        min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(nc.d8acc)
        TauDEM.threshold(np, nc.d8acc, nc.stream_raster, mean_accum, workingdir,
                         mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        # Outlets position initialization and adjustment
        UtilClass.writelog(logfile, '[Output] %s' % 'Moving outlet to stream...', 'a')
        if outlet_file is None:  # if not given, take cell with maximum accumulation as outlet
            outlet_file = nc.outlet_pre
            TauDEM.connectdown(np, nc.d8flow, nc.d8acc, outlet_file, nc.outlet_m, wtsd=None,
                               workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir,
                               log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        TauDEM.moveoutletstostrm(np, nc.d8flow, nc.stream_raster, outlet_file,
                                 nc.outlet_m, workingdir=workingdir,
                                 mpiexedir=mpi_bin, exedir=bin_dir,
                                 log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        # Stream skeleton by peuker-douglas algorithm
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating stream skeleton ...', 'a')
        TauDEM.peukerdouglas(np, nc.filldem, nc.stream_pd, workingdir,
                             mpi_bin, bin_dir, log_file=logfile,
                             runtime_file=runtime_file, hostfile=hostfile)
        # Weighted flow acculation with outlet
        UtilClass.writelog(logfile, '[Output] %s' % 'Flow accumulation with outlet...', 'a')
        tmp_outlet = None
        if singlebasin:
            tmp_outlet = nc.outlet_m
        TauDEM.aread8(np, nc.d8flow, nc.d8acc_weight, tmp_outlet, nc.stream_pd, False,
                      workingdir, mpi_bin, bin_dir, log_file=logfile,
                      runtime_file=runtime_file, hostfile=hostfile)
        # Determine threshold by input argument or dropanalysis function
        if thresh <= 0:  # find the optimal threshold using dropanalysis function
            UtilClass.writelog(logfile, '[Output] %s' %
                               'Drop analysis to select optimal threshold...', 'a')
            min_accum, max_accum, mean_accum, std_accum = \
                RasterUtilClass.raster_statistics(nc.d8acc_weight)
            if mean_accum - std_accum < 0:
                minthresh = mean_accum
            else:
                minthresh = mean_accum - std_accum
            maxthresh = mean_accum + std_accum
            TauDEM.dropanalysis(np, nc.filldem, nc.d8flow, nc.d8acc_weight,
                                nc.d8acc_weight, nc.outlet_m, minthresh, maxthresh,
                                20, 'true', nc.drptxt, workingdir, mpi_bin, bin_dir,
                                log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
            if not FileClass.is_file_exists(nc.drptxt):
                # raise RuntimeError('Dropanalysis failed and drp.txt was not created!')
                UtilClass.writelog(logfile, '[Output] %s' %
                                   'dropanalysis failed!', 'a')
                thresh = 0.5 * (maxthresh - minthresh) + minthresh
            else:
                with open(nc.drptxt, 'r', encoding='utf-8') as drpf:
                    temp_contents = drpf.read()
                    (beg, thresh) = temp_contents.rsplit(' ', 1)
            thresh = float(thresh)
            UtilClass.writelog(logfile, '[Output] %s: %f' %
                               ('Selected optimal threshold: ', thresh), 'a')
        # Final stream network
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating stream raster...', 'a')
        TauDEM.threshold(np, nc.d8acc_weight, nc.stream_raster, thresh,
                         workingdir, mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating stream net...', 'a')
        TauDEM.streamnet(np, nc.filldem, nc.d8flow, nc.d8acc_weight, nc.stream_raster,
                         nc.outlet_m, nc.stream_order, nc.channel_net,
                         nc.channel_coord, nc.streamnet_shp, nc.subbsn,
                         workingdir, mpi_bin, bin_dir,
                         log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        # Serialize IDs of subbasins and the corresponding streams
        UtilClass.writelog(logfile, '[Output] %s' % 'Serialize subbasin&stream IDs...', 'a')
        id_map = StreamnetUtil.serialize_streamnet(nc.streamnet_shp, nc.streamnet_m)
        RasterUtilClass.raster_reclassify(nc.subbsn, id_map, nc.subbsn_m, GDT_Int32)
        StreamnetUtil.assign_stream_id_raster(nc.stream_raster, nc.subbsn_m, nc.stream_m)
        # convert raster to shapefile (for subbasin and basin)
        UtilClass.writelog(logfile, '[Output] %s' % 'Generating subbasin vector...', 'a')
        VectorUtilClass.raster2shp(nc.subbsn_m, nc.subbsn_shp, 'subbasin', 'SUBBASINID')
        # Finish the workflow
        UtilClass.writelog(logfile, '[Output] %s' %
                           'Original subbasin delineation is finished!', 'a')
コード例 #25
0
ファイル: TauDEM.py プロジェクト: crazyzlj/PyGeoC
    def watershed_delineation(np, dem, outlet_file=None, thresh=0, singlebasin=False,
                              workingdir=None, mpi_bin=None, bin_dir=None,
                              logfile=None, runtime_file=None, hostfile=None):
        """Watershed Delineation."""
        # 1. Check directories
        if not os.path.exists(dem):
            TauDEM.error('DEM: %s is not existed!' % dem)
        dem = os.path.abspath(dem)
        if workingdir is None:
            workingdir = os.path.dirname(dem)
        namecfg = TauDEMFilesUtils(workingdir)
        workingdir = namecfg.workspace
        UtilClass.mkdir(workingdir)
        # 2. Check log file
        if logfile is not None and FileClass.is_file_exists(logfile):
            os.remove(logfile)
        # 3. Get predefined intermediate file names
        filled_dem = namecfg.filldem
        flow_dir = namecfg.d8flow
        slope = namecfg.slp
        flow_dir_dinf = namecfg.dinf
        slope_dinf = namecfg.dinf_slp
        dir_code_dinf = namecfg.dinf_d8dir
        weight_dinf = namecfg.dinf_weight
        acc = namecfg.d8acc
        stream_raster = namecfg.stream_raster
        default_outlet = namecfg.outlet_pre
        modified_outlet = namecfg.outlet_m
        stream_skeleton = namecfg.stream_pd
        acc_with_weight = namecfg.d8acc_weight
        stream_order = namecfg.stream_order
        ch_network = namecfg.channel_net
        ch_coord = namecfg.channel_coord
        stream_net = namecfg.streamnet_shp
        subbasin = namecfg.subbsn
        dist2_stream_d8 = namecfg.dist2stream_d8

        # 4. perform calculation
        UtilClass.writelog(logfile, '[Output] %d..., %s' % (10, 'pitremove DEM...'), 'a')
        TauDEM.pitremove(np, dem, filled_dem, workingdir, mpi_bin, bin_dir,
                         log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %d..., %s' %
                           (20, 'Calculating D8 and Dinf flow direction...'), 'a')
        TauDEM.d8flowdir(np, filled_dem, flow_dir, slope, workingdir,
                         mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        TauDEM.dinfflowdir(np, filled_dem, flow_dir_dinf, slope_dinf, workingdir,
                           mpi_bin, bin_dir, log_file=logfile,
                           runtime_file=runtime_file, hostfile=hostfile)
        DinfUtil.output_compressed_dinf(flow_dir_dinf, dir_code_dinf, weight_dinf)
        UtilClass.writelog(logfile, '[Output] %d..., %s' % (30, 'D8 flow accumulation...'), 'a')
        TauDEM.aread8(np, flow_dir, acc, None, None, False, workingdir, mpi_bin, bin_dir,
                      log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %d..., %s' %
                           (40, 'Generating stream raster initially...'), 'a')
        min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(acc)
        TauDEM.threshold(np, acc, stream_raster, mean_accum, workingdir,
                         mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %d..., %s' % (50, 'Moving outlet to stream...'), 'a')
        if outlet_file is None:
            outlet_file = default_outlet
            TauDEM.connectdown(np, flow_dir, acc, outlet_file, wtsd=None,
                               workingdir=workingdir, mpiexedir=mpi_bin, exedir=bin_dir,
                               log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        TauDEM.moveoutletstostrm(np, flow_dir, stream_raster, outlet_file,
                                 modified_outlet, workingdir, mpi_bin, bin_dir,
                                 log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %d..., %s' %
                           (60, 'Generating stream skeleton...'), 'a')
        TauDEM.peukerdouglas(np, filled_dem, stream_skeleton, workingdir,
                             mpi_bin, bin_dir, log_file=logfile,
                             runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %d..., %s' %
                           (70, 'Flow accumulation with outlet...'), 'a')
        tmp_outlet = None
        if singlebasin:
            tmp_outlet = modified_outlet
        TauDEM.aread8(np, flow_dir, acc_with_weight, tmp_outlet, stream_skeleton, False,
                      workingdir, mpi_bin, bin_dir, log_file=logfile,
                      runtime_file=runtime_file, hostfile=hostfile)

        if thresh <= 0:  # find the optimal threshold using dropanalysis function
            UtilClass.writelog(logfile, '[Output] %d..., %s' %
                               (75, 'Drop analysis to select optimal threshold...'), 'a')
            min_accum, max_accum, mean_accum, std_accum = \
                RasterUtilClass.raster_statistics(acc_with_weight)
            if mean_accum - std_accum < 0:
                minthresh = mean_accum
            else:
                minthresh = mean_accum - std_accum
            maxthresh = mean_accum + std_accum
            numthresh = 20
            logspace = 'true'
            drp_file = namecfg.drptxt
            TauDEM.dropanalysis(np, filled_dem, flow_dir, acc_with_weight,
                                acc_with_weight, modified_outlet, minthresh, maxthresh,
                                numthresh, logspace, drp_file, workingdir, mpi_bin, bin_dir,
                                log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
            if not FileClass.is_file_exists(drp_file):
                raise RuntimeError('Dropanalysis failed and drp.txt was not created!')
            with open(drp_file, 'r', encoding='utf-8') as drpf:
                temp_contents = drpf.read()
                (beg, thresh) = temp_contents.rsplit(' ', 1)
            print(thresh)
        UtilClass.writelog(logfile, '[Output] %d..., %s' % (80, 'Generating stream raster...'), 'a')
        TauDEM.threshold(np, acc_with_weight, stream_raster, float(thresh),
                         workingdir, mpi_bin, bin_dir, log_file=logfile,
                         runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %d..., %s' % (90, 'Generating stream net...'), 'a')
        TauDEM.streamnet(np, filled_dem, flow_dir, acc_with_weight, stream_raster,
                         modified_outlet, stream_order, ch_network,
                         ch_coord, stream_net, subbasin, workingdir, mpi_bin, bin_dir,
                         log_file=logfile, runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %d..., %s' %
                           (95, 'Calculating distance to stream (D8)...'), 'a')
        TauDEM.d8hdisttostrm(np, flow_dir, stream_raster, dist2_stream_d8, 1,
                             workingdir, mpi_bin, bin_dir, log_file=logfile,
                             runtime_file=runtime_file, hostfile=hostfile)
        UtilClass.writelog(logfile, '[Output] %d.., %s' %
                           (100, 'Original subbasin delineation is finished!'), 'a')
コード例 #26
0
ファイル: TauDEM.py プロジェクト: crazyzlj/PyGeoC
    def run(function_name, in_files, wp=None, in_params=None, out_files=None, mpi_params=None,
            log_params=None):
        """
        Run TauDEM function.

         - 1. The command will not execute if any input file does not exist.
         - 2. An error will be detected after running the TauDEM command if
              any output file does not exist;

        Args:
            function_name (str): Full path of TauDEM function.
            in_files (dict, required): Dict of pairs of parameter id (string) and file path
                (string or list) for input files, e.g.::

                    {'-z': '/full/path/to/dem.tif'}

            wp (str, optional): Workspace for outputs. If not specified, the directory of the
                first input file in ``in_files`` will be used.
            in_params (dict, optional): Dict of pairs of parameter id (string) and value
                (or None for a flag parameter without a value) for input parameters, e.g.::

                    {'-nc': None}
                    {'-thresh': threshold}
                    {'-m': 'ave' 's', '-nc': None}

            out_files (dict, optional): Dict of pairs of parameter id (string) and file
                path (string or list) for output files, e.g.::

                    {'-fel': 'filleddem.tif'}
                    {'-maxS': ['harden.tif', 'maxsimi.tif']}

            mpi_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for MPI setting, e.g.::

                    {'mpipath':'/soft/bin','hostfile':'/soft/bin/cluster.node','n':4}
                    {'mpipath':'/soft/bin', 'n':4}
                    {'n':4}

            log_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for runtime and log output parameters. e.g.::

                    {'logfile': '/home/user/log.txt',
                     'runtimefile': '/home/user/runtime.txt'}

        Returns:
            True if TauDEM run successfully, otherwise False.
        """
        # Check input files
        if in_files is None:
            TauDEM.error('Input files parameter is required!')
        if not isinstance(in_files, dict):
            TauDEM.error('The input files parameter must be a dict!')
        for (pid, infile) in iteritems(in_files):
            if infile is None:
                continue
            if isinstance(infile, list) or isinstance(infile, tuple):
                for idx, inf in enumerate(infile):
                    if inf is None:
                        continue
                    inf, wp = TauDEM.check_infile_and_wp(inf, wp)
                    in_files[pid][idx] = inf
                continue
            if os.path.exists(infile):
                infile, wp = TauDEM.check_infile_and_wp(infile, wp)
                in_files[pid] = os.path.abspath(infile)
            else:
                # For more flexible input files extension.
                # e.g., -inputtags 1 <path/to/tag1.tif> 2 <path/to/tag2.tif> ...
                # in such unpredictable circumstance, we cannot check the existance of
                # input files, so the developer will check it in other place.
                if len(StringClass.split_string(infile, ' ')) > 1:
                    continue
                else:  # the infile still should be a existing file, so check in workspace
                    if wp is None:
                        TauDEM.error('Workspace should not be None!')
                    infile = wp + os.sep + infile
                    if not os.path.exists(infile):
                        TauDEM.error('Input files parameter %s: %s is not existed!' %
                                     (pid, infile))
                    in_files[pid] = os.path.abspath(infile)
        # Make workspace dir if not existed
        UtilClass.mkdir(wp)
        # Check the log parameter
        log_file = None
        runtime_file = None
        if log_params is not None:
            if not isinstance(log_params, dict):
                TauDEM.error('The log parameter must be a dict!')
            if 'logfile' in log_params and log_params['logfile'] is not None:
                log_file = log_params['logfile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in log_file:
                    log_file = wp + os.sep + log_file
                    log_file = os.path.abspath(log_file)
            if 'runtimefile' in log_params and log_params['runtimefile'] is not None:
                runtime_file = log_params['runtimefile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in runtime_file:
                    runtime_file = wp + os.sep + runtime_file
                    runtime_file = os.path.abspath(runtime_file)

        # remove out_files to avoid any file IO related error
        new_out_files = list()
        if out_files is not None:
            if not isinstance(out_files, dict):
                TauDEM.error('The output files parameter must be a dict!')
            for (pid, out_file) in iteritems(out_files):
                if out_file is None:
                    continue
                if isinstance(out_file, list) or isinstance(out_file, tuple):
                    for idx, outf in enumerate(out_file):
                        if outf is None:
                            continue
                        outf = FileClass.get_file_fullpath(outf, wp)
                        FileClass.remove_files(outf)
                        out_files[pid][idx] = outf
                        new_out_files.append(outf)
                else:
                    out_file = FileClass.get_file_fullpath(out_file, wp)
                    FileClass.remove_files(out_file)
                    out_files[pid] = out_file
                    new_out_files.append(out_file)

        # concatenate command line
        commands = list()
        # MPI header
        if mpi_params is not None:
            if not isinstance(mpi_params, dict):
                TauDEM.error('The MPI settings parameter must be a dict!')
            if 'mpipath' in mpi_params and mpi_params['mpipath'] is not None:
                commands.append(mpi_params['mpipath'] + os.sep + 'mpiexec')
            else:
                commands.append('mpiexec')
            if 'hostfile' in mpi_params and mpi_params['hostfile'] is not None \
                    and not StringClass.string_match(mpi_params['hostfile'], 'none') \
                    and os.path.isfile(mpi_params['hostfile']):
                commands.append('-f')
                commands.append(mpi_params['hostfile'])
            if 'n' in mpi_params and mpi_params['n'] > 1:
                commands.append('-n')
                commands.append(str(mpi_params['n']))
            else:  # If number of processor is less equal than 1, then do not call mpiexec.
                commands = []
        # append TauDEM function name, which can be full path or just one name
        commands.append(function_name)
        # append input files
        for (pid, infile) in iteritems(in_files):
            if infile is None:
                continue
            if pid[0] != '-':
                pid = '-' + pid
            commands.append(pid)
            if isinstance(infile, list) or isinstance(infile, tuple):
                commands.append(' '.join(tmpf for tmpf in infile))
            else:
                commands.append(infile)
        # append input parameters
        if in_params is not None:
            if not isinstance(in_params, dict):
                TauDEM.error('The input parameters must be a dict!')
            for (pid, v) in iteritems(in_params):
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                # allow for parameter which is an flag without value
                if v != '' and v is not None:
                    if MathClass.isnumerical(v):
                        commands.append(str(v))
                    else:
                        commands.append(v)
        # append output parameters
        if out_files is not None:
            for (pid, outfile) in iteritems(out_files):
                if outfile is None:
                    continue
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                if isinstance(outfile, list) or isinstance(outfile, tuple):
                    commands.append(' '.join(tmpf for tmpf in outfile))
                else:
                    commands.append(outfile)
        # run command
        runmsg = UtilClass.run_command(commands)
        TauDEM.log(runmsg, log_file)
        TauDEM.output_runtime_to_log(function_name, runmsg, runtime_file)
        # Check out_files, raise RuntimeError if not exist.
        for of in new_out_files:
            if not os.path.exists(of):
                TauDEM.error('%s failed, and the %s was not generated!' % (function_name, of))
                return False
        return True
コード例 #27
0
ファイル: config.py プロジェクト: jx-qqq/SEIMS
    def __init__(self, cf):
        """Initialization."""
        # 1. Directories
        self.base_dir = None
        self.clim_dir = None
        self.spatial_dir = None
        self.observe_dir = None
        self.scenario_dir = None
        self.model_dir = None
        self.txt_db_dir = None
        self.preproc_script_dir = None
        self.seims_bin = None
        self.mpi_bin = None
        self.workspace = None
        # 1.1. Directory determined flags
        self.use_observed = True
        self.use_scernario = True
        # 2. MongoDB configuration and database, collation, GridFS names
        self.hostname = '127.0.0.1'  # localhost by default
        self.port = 27017
        self.climate_db = ''
        self.bmp_scenario_db = ''
        self.spatial_db = ''
        # 3. Climate inputs
        self.hydro_climate_vars = None
        self.prec_sites = None
        self.prec_data = None
        self.Meteo_sites = None
        self.Meteo_data = None
        self.thiessen_field = 'ID'
        # 4. Spatial inputs
        self.prec_sites_thiessen = None
        self.meteo_sites_thiessen = None
        self.dem = None
        self.outlet_file = None
        self.landuse = None
        self.landcover_init_param = None
        self.soil = None
        self.soil_property = None
        self.fields_partition = False
        self.fields_partition_thresh = list()
        self.additional_rs = dict()
        # 5. Option parameters
        self.d8acc_threshold = 0
        self.np = 4
        self.d8down_method = 's'
        self.dorm_hr = -1.
        self.temp_base = 0.
        self.imper_perc_in_urban = 0.
        self.default_landuse = -1
        self.default_soil = -1
        # 1. Directories
        if 'PATH' in cf.sections():
            self.base_dir = cf.get('PATH', 'base_data_dir')
            self.clim_dir = cf.get('PATH', 'climate_data_dir')
            self.spatial_dir = cf.get('PATH', 'spatial_data_dir')
            self.observe_dir = cf.get('PATH', 'measurement_data_dir')
            self.scenario_dir = cf.get('PATH', 'bmp_data_dir')
            self.model_dir = cf.get('PATH', 'model_dir')
            self.txt_db_dir = cf.get('PATH', 'txt_db_dir')
            self.preproc_script_dir = cf.get('PATH', 'preproc_script_dir')
            self.seims_bin = cf.get('PATH', 'cpp_program_dir')
            self.mpi_bin = cf.get('PATH', 'mpiexec_dir')
            self.workspace = cf.get('PATH', 'working_dir')
        else:
            raise ValueError('[PATH] section MUST be existed in *.ini file.')
        if not (FileClass.is_dir_exists(self.base_dir)
                and FileClass.is_dir_exists(self.model_dir)
                and FileClass.is_dir_exists(self.txt_db_dir)
                and FileClass.is_dir_exists(self.preproc_script_dir)
                and FileClass.is_dir_exists(self.seims_bin)):
            raise IOError(
                'Please Check Directories defined in [PATH]. '
                'BASE_DATA_DIR, MODEL_DIR, TXT_DB_DIR, PREPROC_SCRIPT_DIR, '
                'and CPP_PROGRAM_DIR are required!')
        if not FileClass.is_dir_exists(self.mpi_bin):
            self.mpi_bin = None
        if not FileClass.is_dir_exists(self.workspace):
            try:  # first try to make dirs
                UtilClass.mkdir(self.workspace)
                # os.mkdir(self.workspace)
            except OSError as exc:
                self.workspace = self.model_dir + os.path.sep + 'preprocess_output'
                print('WARNING: Make WORKING_DIR failed! Use the default: %s' %
                      self.workspace)
                if not os.path.exists(self.workspace):
                    UtilClass.mkdir(self.workspace)

        self.dirs = DirNameUtils(self.workspace)
        self.logs = LogNameUtils(self.dirs.log)
        self.vecs = VectorNameUtils(self.dirs.geoshp)
        self.taudems = TauDEMFilesUtils(self.dirs.taudem)
        self.spatials = SpatialNamesUtils(self.dirs.geodata2db)
        self.modelcfgs = ModelCfgUtils(self.model_dir)
        self.paramcfgs = ModelParamDataUtils(self.preproc_script_dir +
                                             os.path.sep + 'database')

        if not FileClass.is_dir_exists(self.clim_dir):
            print(
                'The CLIMATE_DATA_DIR is not existed, try the default folder name "climate".'
            )
            self.clim_dir = self.base_dir + os.path.sep + 'climate'
            if not FileClass.is_dir_exists(self.clim_dir):
                raise IOError(
                    'Directories named "climate" MUST BE located in [base_dir]!'
                )

        if not FileClass.is_dir_exists(self.spatial_dir):
            print(
                'The SPATIAL_DATA_DIR is not existed, try the default folder name "spatial".'
            )
            self.spatial_dir = self.base_dir + os.path.sep + 'spatial'
            raise IOError(
                'Directories named "spatial" MUST BE located in [base_dir]!')

        if not FileClass.is_dir_exists(self.observe_dir):
            self.observe_dir = None
            self.use_observed = False

        if not FileClass.is_dir_exists(self.scenario_dir):
            self.scenario_dir = None
            self.use_scernario = False

        # 2. MongoDB related
        if 'MONGODB' in cf.sections():
            self.hostname = cf.get('MONGODB', 'hostname')
            self.port = cf.getint('MONGODB', 'port')
            self.climate_db = cf.get('MONGODB', 'climatedbname')
            self.bmp_scenario_db = cf.get('MONGODB', 'bmpscenariodbname')
            self.spatial_db = cf.get('MONGODB', 'spatialdbname')
        else:
            raise ValueError(
                '[MONGODB] section MUST be existed in *.ini file.')
        if not StringClass.is_valid_ip_addr(self.hostname):
            raise ValueError('HOSTNAME illegal defined in [MONGODB]!')

        # 3. Climate Input
        if 'CLIMATE' in cf.sections():
            self.hydro_climate_vars = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'hydroclimatevarfile')
            self.prec_sites = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'precsitefile')
            self.prec_data = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'precdatafile')
            self.Meteo_sites = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'meteositefile')
            self.Meteo_data = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'meteodatafile')
            self.thiessen_field = cf.get('CLIMATE', 'thiessenidfield')
        else:
            raise ValueError(
                'Climate input file names MUST be provided in [CLIMATE]!')

        # 4. Spatial Input
        if 'SPATIAL' in cf.sections():
            self.prec_sites_thiessen = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'precsitesthiessen')
            self.meteo_sites_thiessen = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'meteositesthiessen')
            self.dem = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'dem')
            self.outlet_file = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'outlet_file')
            if not os.path.exists(self.outlet_file):
                self.outlet_file = None
            self.landuse = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'landusefile')
            self.landcover_init_param = self.txt_db_dir + os.path.sep + cf.get(
                'SPATIAL', 'landcoverinitfile')
            self.soil = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'soilseqnfile')
            self.soil_property = self.txt_db_dir + os.path.sep + cf.get(
                'SPATIAL', 'soilseqntext')
            if cf.has_option('SPATIAL', 'additionalfile'):
                additional_dict_str = cf.get('SPATIAL', 'additionalfile')
                tmpdict = json.loads(additional_dict_str)
                tmpdict = {
                    str(k): (str(v) if is_string(v) else v)
                    for k, v in list(tmpdict.items())
                }
                for k, v in list(tmpdict.items()):
                    # Existence check has been moved to mask_origin_delineated_data()
                    #  in sp_delineation.py
                    self.additional_rs[k] = v
            # Field partition
            if cf.has_option('SPATIAL', 'field_partition_thresh'):
                ths = cf.get('SPATIAL', 'field_partition_thresh')
                thsv = StringClass.extract_numeric_values_from_string(ths)
                if thsv is not None:
                    self.fields_partition_thresh = [int(v) for v in thsv]
                    self.fields_partition = True
        else:
            raise ValueError(
                'Spatial input file names MUST be provided in [SPATIAL]!')

        # 5. Optional parameters
        if 'OPTIONAL_PARAMETERS' in cf.sections():
            self.d8acc_threshold = cf.getfloat('OPTIONAL_PARAMETERS',
                                               'd8accthreshold')
            self.np = cf.getint('OPTIONAL_PARAMETERS', 'np')
            self.d8down_method = cf.get('OPTIONAL_PARAMETERS', 'd8downmethod')
            if StringClass.string_match(self.d8down_method, 'surface'):
                self.d8down_method = 's'
            elif StringClass.string_match(self.d8down_method, 'horizontal'):
                self.d8down_method = 'h'
            elif StringClass.string_match(self.d8down_method, 'pythagoras'):
                self.d8down_method = 'p'
            elif StringClass.string_match(self.d8down_method, 'vertical'):
                self.d8down_method = 'v'
            else:
                self.d8down_method = self.d8down_method.lower()
                if self.d8down_method not in ['s', 'h', 'p', 'v']:
                    self.d8down_method = 's'
            self.dorm_hr = cf.getfloat('OPTIONAL_PARAMETERS', 'dorm_hr')
            self.temp_base = cf.getfloat('OPTIONAL_PARAMETERS', 't_base')
            self.imper_perc_in_urban = cf.getfloat(
                'OPTIONAL_PARAMETERS', 'imperviouspercinurbancell')
            self.default_landuse = cf.getint('OPTIONAL_PARAMETERS',
                                             'defaultlanduse')
            self.default_soil = cf.getint('OPTIONAL_PARAMETERS', 'defaultsoil')
コード例 #28
0
    def metis_partition(g, weight, wp, bin_dir):
        """Partition subbasins into multiple groups by METIS

        Args:
            g: `NetworkX.DiGraph` object
            weight: weight of each node, e.g., area of subbasin, {subbasinID: weight}
            wp: output directory
            bin_dir: directory of METIS package
        Returns:
            group_dict: {subbasinID: {'group': group_number_list,
                                      'kmetis': group_ids_list_by_kmetis,
                                      'pmetis': group_ids_list_by_pmetis}
                        }
        """
        group_dict = dict()
        for subbsn_id in g.nodes():
            group_dict[subbsn_id] = {
                'group': list(),
                'kmetis': list(),
                'pmetis': list()
            }

        metis_input = ImportReaches2Mongo.prepare_node_with_weight_for_metis(
            g, weight, wp)
        # Creating group divided numbers
        nlist = list(range(1, 129))
        nlist.extend([192, 256, 384, 512, 768, 1536])
        # nlist should be less than the number of subbasin, otherwise it will make nonsense.
        ns = g.nodes()
        nlist = [x for x in nlist if x <= max(ns)]
        # Make directories for KMETIS and PMETIS
        UtilClass.mkdir(wp + os.path.sep + 'kmetis')
        UtilClass.mkdir(wp + os.path.sep + 'pmetis')
        for n in nlist:
            print('divide number: %d' % n)
            if n <= 1:
                for subbsn_id in g.nodes():
                    group_dict[subbsn_id]['group'].append(1)
                    group_dict[subbsn_id]['kmetis'].append(0)
                    group_dict[subbsn_id]['pmetis'].append(0)
                continue
            # kmetis, -ptype=kway, direct k-way partitioning (default)
            str_command = '"%s/gpmetis" %s %d' % (bin_dir, metis_input, n)
            result = UtilClass.run_command(str_command)
            with open('%s/kmetis/kmetisResult%d.txt' % (wp, n),
                      'w') as f_metis_output:
                for line in result:
                    f_metis_output.write(line)
            metis_output = '%s.part.%d' % (metis_input, n)
            with open(metis_output, 'r') as f:
                lines = f.readlines()
            group_kmetis = [int(item) for item in lines]
            adjust_group_result(weight, group_kmetis, n)
            shutil.move(metis_output, '%s/kmetis/metis.part.%d' % (wp, n))

            # pmetis, -ptype=rb, recursive bisectioning
            str_command = '"%s/gpmetis" -ptype=rb %s %d' % (bin_dir,
                                                            metis_input, n)
            result = UtilClass.run_command(str_command)
            with open('%s/pmetis/pmetisResult%d.txt' % (wp, n),
                      'w') as f_metis_output:
                for line in result:
                    f_metis_output.write(line)
            with open(metis_output, 'r') as f:
                lines = f.readlines()
            group_pmetis = [int(item) for item in lines]
            adjust_group_result(weight, group_pmetis, n)
            shutil.move(metis_output, '%s/pmetis/metis.part.%d' % (wp, n))

            for i, (gk, gp) in enumerate(zip(group_kmetis, group_pmetis)):
                group_dict[i + 1]['group'].append(n)
                group_dict[i + 1]['kmetis'].append(gk)
                group_dict[i + 1]['pmetis'].append(gp)
        return group_dict
コード例 #29
0
ファイル: config.py プロジェクト: crazyzlj/SEIMS
    def __init__(self, cf):
        """Initialization."""
        # 1. Directories
        self.base_dir = None
        self.clim_dir = None
        self.spatial_dir = None
        self.observe_dir = None
        self.scenario_dir = None
        self.model_dir = None
        self.txt_db_dir = None
        self.preproc_script_dir = None
        self.seims_bin = None
        self.mpi_bin = None
        self.workspace = None
        # 1.1. Directory determined flags
        self.use_observed = True
        self.use_scernario = True
        # 2. MongoDB configuration and database, collation, GridFS names
        self.hostname = '127.0.0.1'  # localhost by default
        self.port = 27017
        self.climate_db = ''
        self.bmp_scenario_db = ''
        self.spatial_db = ''
        # 3. Switch for building SEIMS. These switches should be removed! By lj.
        # self.gen_cn = True
        # self.gen_runoff_coef = True
        # self.gen_crop = True
        # self.gen_iuh = True
        # 4. Climate inputs
        self.hydro_climate_vars = None
        self.prec_sites = None
        self.prec_data = None
        self.Meteo_sites = None
        self.Meteo_data = None
        self.thiessen_field = 'ID'
        # 5. Spatial inputs
        self.prec_sites_thiessen = None
        self.meteo_sites_thiessen = None
        self.dem = None
        self.outlet_file = None
        self.landuse = None
        self.landcover_init_param = None
        self.soil = None
        self.soil_property = None
        self.fields_partition = False
        self.fields_partition_thresh = list()
        self.additional_rs = dict()
        # 6. Option parameters
        self.d8acc_threshold = 0
        self.np = 4
        self.d8down_method = 's'
        self.dorm_hr = -1.
        self.temp_base = 0.
        self.imper_perc_in_urban = 0.
        self.default_landuse = -1
        self.default_soil = -1
        # 1. Directories
        if 'PATH' in cf.sections():
            self.base_dir = cf.get('PATH', 'base_data_dir')
            self.clim_dir = cf.get('PATH', 'climate_data_dir')
            self.spatial_dir = cf.get('PATH', 'spatial_data_dir')
            self.observe_dir = cf.get('PATH', 'measurement_data_dir')
            self.scenario_dir = cf.get('PATH', 'bmp_data_dir')
            self.model_dir = cf.get('PATH', 'model_dir')
            self.txt_db_dir = cf.get('PATH', 'txt_db_dir')
            self.preproc_script_dir = cf.get('PATH', 'preproc_script_dir')
            self.seims_bin = cf.get('PATH', 'cpp_program_dir')
            self.mpi_bin = cf.get('PATH', 'mpiexec_dir')
            self.workspace = cf.get('PATH', 'working_dir')
        else:
            raise ValueError('[PATH] section MUST be existed in *.ini file.')
        if not (FileClass.is_dir_exists(self.base_dir)
                and FileClass.is_dir_exists(self.model_dir)
                and FileClass.is_dir_exists(self.txt_db_dir)
                and FileClass.is_dir_exists(self.preproc_script_dir)
                and FileClass.is_dir_exists(self.seims_bin)):
            raise IOError('Please Check Directories defined in [PATH]. '
                          'BASE_DATA_DIR, MODEL_DIR, TXT_DB_DIR, PREPROC_SCRIPT_DIR, '
                          'and CPP_PROGRAM_DIR are required!')
        if not FileClass.is_dir_exists(self.mpi_bin):
            self.mpi_bin = None
        if not FileClass.is_dir_exists(self.workspace):
            try:  # first try to make dirs
                UtilClass.mkdir(self.workspace)
                # os.mkdir(self.workspace)
            except OSError as exc:
                self.workspace = self.model_dir + os.path.sep + 'preprocess_output'
                print('WARNING: Make WORKING_DIR failed: %s. '
                      'Use the default: %s' % (exc.message, self.workspace))
                if not os.path.exists(self.workspace):
                    UtilClass.mkdir(self.workspace)

        self.dirs = DirNameUtils(self.workspace)
        self.logs = LogNameUtils(self.dirs.log)
        self.vecs = VectorNameUtils(self.dirs.geoshp)
        self.taudems = TauDEMFilesUtils(self.dirs.taudem)
        self.spatials = SpatialNamesUtils(self.dirs.geodata2db)
        self.modelcfgs = ModelCfgUtils(self.model_dir)
        self.paramcfgs = ModelParamDataUtils(self.preproc_script_dir + os.path.sep + 'database')

        if not FileClass.is_dir_exists(self.clim_dir):
            print('The CLIMATE_DATA_DIR is not existed, try the default folder name "climate".')
            self.clim_dir = self.base_dir + os.path.sep + 'climate'
            if not FileClass.is_dir_exists(self.clim_dir):
                raise IOError('Directories named "climate" MUST BE located in [base_dir]!')

        if not FileClass.is_dir_exists(self.spatial_dir):
            print('The SPATIAL_DATA_DIR is not existed, try the default folder name "spatial".')
            self.spatial_dir = self.base_dir + os.path.sep + 'spatial'
            raise IOError('Directories named "spatial" MUST BE located in [base_dir]!')

        if not FileClass.is_dir_exists(self.observe_dir):
            self.observe_dir = None
            self.use_observed = False

        if not FileClass.is_dir_exists(self.scenario_dir):
            self.scenario_dir = None
            self.use_scernario = False

        # 2. MongoDB related
        if 'MONGODB' in cf.sections():
            self.hostname = cf.get('MONGODB', 'hostname')
            self.port = cf.getint('MONGODB', 'port')
            self.climate_db = cf.get('MONGODB', 'climatedbname')
            self.bmp_scenario_db = cf.get('MONGODB', 'bmpscenariodbname')
            self.spatial_db = cf.get('MONGODB', 'spatialdbname')
        else:
            raise ValueError('[MONGODB] section MUST be existed in *.ini file.')
        if not StringClass.is_valid_ip_addr(self.hostname):
            raise ValueError('HOSTNAME illegal defined in [MONGODB]!')

        # 3. Model related switch. The SWITCH section should be removed! By lj.
        # by default, OpenMP version and daily (longterm) mode will be built
        # if 'SWITCH' in cf.sections():
        #     self.gen_cn = cf.getboolean('SWITCH', 'gencn')
        #     self.gen_runoff_coef = cf.getboolean('SWITCH', 'genrunoffcoef')
        #     self.gen_crop = cf.getboolean('SWITCH', 'gencrop')
        #
        # if self.storm_mode:
        #     self.gen_iuh = False
        #     self.climate_db = ModelNameUtils.standardize_climate_dbname(self.climate_db)

        # 4. Climate Input
        if 'CLIMATE' in cf.sections():
            self.hydro_climate_vars = self.clim_dir + os.path.sep + cf.get('CLIMATE',
                                                                           'hydroclimatevarfile')
            self.prec_sites = self.clim_dir + os.path.sep + cf.get('CLIMATE', 'precsitefile')
            self.prec_data = self.clim_dir + os.path.sep + cf.get('CLIMATE', 'precdatafile')
            self.Meteo_sites = self.clim_dir + os.path.sep + cf.get('CLIMATE', 'meteositefile')
            self.Meteo_data = self.clim_dir + os.path.sep + cf.get('CLIMATE', 'meteodatafile')
            self.thiessen_field = cf.get('CLIMATE', 'thiessenidfield')
        else:
            raise ValueError('Climate input file names MUST be provided in [CLIMATE]!')

        # 5. Spatial Input
        if 'SPATIAL' in cf.sections():
            self.prec_sites_thiessen = self.spatial_dir + os.path.sep + cf.get('SPATIAL',
                                                                               'precsitesthiessen')
            self.meteo_sites_thiessen = self.spatial_dir + os.path.sep + cf.get('SPATIAL',
                                                                                'meteositesthiessen')
            self.dem = self.spatial_dir + os.path.sep + cf.get('SPATIAL', 'dem')
            self.outlet_file = self.spatial_dir + os.path.sep + cf.get('SPATIAL', 'outlet_file')
            if not os.path.exists(self.outlet_file):
                self.outlet_file = None
            self.landuse = self.spatial_dir + os.path.sep + cf.get('SPATIAL', 'landusefile')
            self.landcover_init_param = self.txt_db_dir + os.path.sep + cf.get('SPATIAL',
                                                                               'landcoverinitfile')
            self.soil = self.spatial_dir + os.path.sep + cf.get('SPATIAL', 'soilseqnfile')
            self.soil_property = self.txt_db_dir + os.path.sep + cf.get('SPATIAL', 'soilseqntext')
            if cf.has_option('SPATIAL', 'additionalfile'):
                additional_dict_str = cf.get('SPATIAL', 'additionalfile')
                tmpdict = json.loads(additional_dict_str)
                tmpdict = {str(k): (str(v) if isinstance(v, str) else v) for k, v in
                           list(tmpdict.items())}
                for k, v in list(tmpdict.items()):
                    # Existence check has been moved to mask_origin_delineated_data()
                    #  in sp_delineation.py
                    self.additional_rs[k] = v
            # Field partition
            if cf.has_option('SPATIAL', 'field_partition_thresh'):
                ths = cf.get('SPATIAL', 'field_partition_thresh')
                thsv = StringClass.extract_numeric_values_from_string(ths)
                if thsv is not None:
                    self.fields_partition_thresh = [int(v) for v in thsv]
                    self.fields_partition = True
        else:
            raise ValueError('Spatial input file names MUST be provided in [SPATIAL]!')

        # 6. Option parameters
        if 'OPTIONAL_PARAMETERS' in cf.sections():
            self.d8acc_threshold = cf.getfloat('OPTIONAL_PARAMETERS', 'd8accthreshold')
            self.np = cf.getint('OPTIONAL_PARAMETERS', 'np')
            self.d8down_method = cf.get('OPTIONAL_PARAMETERS', 'd8downmethod')
            if StringClass.string_match(self.d8down_method, 'surface'):
                self.d8down_method = 's'
            elif StringClass.string_match(self.d8down_method, 'horizontal'):
                self.d8down_method = 'h'
            elif StringClass.string_match(self.d8down_method, 'pythagoras'):
                self.d8down_method = 'p'
            elif StringClass.string_match(self.d8down_method, 'vertical'):
                self.d8down_method = 'v'
            else:
                self.d8down_method = self.d8down_method.lower()
                if self.d8down_method not in ['s', 'h', 'p', 'v']:
                    self.d8down_method = 'h'
            self.dorm_hr = cf.getfloat('OPTIONAL_PARAMETERS', 'dorm_hr')
            self.temp_base = cf.getfloat('OPTIONAL_PARAMETERS', 't_base')
            self.imper_perc_in_urban = cf.getfloat('OPTIONAL_PARAMETERS',
                                                   'imperviouspercinurbancell')
            self.default_landuse = cf.getint('OPTIONAL_PARAMETERS', 'defaultlanduse')
            self.default_soil = cf.getint('OPTIONAL_PARAMETERS', 'defaultsoil')
コード例 #30
0
    def run(function_name,
            in_files,
            wp=None,
            in_params=None,
            out_files=None,
            mpi_params=None,
            log_params=None):
        """
        Run TauDEM function.

           1. The command will not execute if any input file does not exist.
           2. An error will be detected after running the TauDEM command if
           any output file does not exist;

        Args:
            function_name (str): Full path of TauDEM function.
            in_files (dict, required): Dict of pairs of parameter id (string) and file path
                (string or list) for input files, e.g.::

                    {'-z': '/full/path/to/dem.tif'}

            wp (str, optional): Workspace for outputs. If not specified, the directory of the
                first input file in ``in_files`` will be used.
            in_params (dict, optional): Dict of pairs of parameter id (string) and value
                (or None for a flag parameter without a value) for input parameters, e.g.::

                    {'-nc': None}
                    {'-thresh': threshold}
                    {'-m': 'ave' 's', '-nc': None}

            out_files (dict, optional): Dict of pairs of parameter id (string) and file
                path (string or list) for output files, e.g.::

                    {'-fel': 'filleddem.tif'}
                    {'-maxS': ['harden.tif', 'maxsimi.tif']}

            mpi_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for MPI setting, e.g.::

                    {'mpipath':'/soft/bin','hostfile':'/soft/bin/cluster.node','n':4}
                    {'mpipath':'/soft/bin', 'n':4}
                    {'n':4}

            log_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for runtime and log output parameters. e.g.::

                    {'logfile': '/home/user/log.txt',
                     'runtimefile': '/home/user/runtime.txt'}

        Returns:
            True if TauDEM run successfully, otherwise False.
        """
        # Check input files
        if in_files is None:
            TauDEM.error('Input files parameter is required!')
        if not isinstance(in_files, dict):
            TauDEM.error('The input files parameter must be a dict!')
        for (pid, infile) in list(in_files.items()):
            if infile is None:
                continue
            if isinstance(infile, list) or isinstance(infile, tuple):
                for idx, inf in enumerate(infile):
                    if inf is None:
                        continue
                    inf, wp = TauDEM.check_infile_and_wp(inf, wp)
                    in_files[pid][idx] = inf
                continue
            if os.path.exists(infile):
                infile, wp = TauDEM.check_infile_and_wp(infile, wp)
                in_files[pid] = os.path.abspath(infile)
            else:
                # For more flexible input files extension.
                # e.g., -inputtags 1 <path/to/tag1.tif> 2 <path/to/tag2.tif> ...
                # in such unpredictable circumstance, we cannot check the existance of
                # input files, so the developer will check it in other place.
                if len(StringClass.split_string(infile, ' ')) > 1:
                    continue
                else:  # the infile still should be a existing file, so check in workspace
                    if wp is None:
                        TauDEM.error('Workspace should not be None!')
                    infile = wp + os.sep + infile
                    if not os.path.exists(infile):
                        TauDEM.error(
                            'Input files parameter %s: %s is not existed!' %
                            (pid, infile))
                    in_files[pid] = os.path.abspath(infile)
        # Make workspace dir if not existed
        UtilClass.mkdir(wp)
        # Check the log parameter
        log_file = None
        runtime_file = None
        if log_params is not None:
            if not isinstance(log_params, dict):
                TauDEM.error('The log parameter must be a dict!')
            if 'logfile' in log_params and log_params['logfile'] is not None:
                log_file = log_params['logfile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in log_file:
                    log_file = wp + os.sep + log_file
                    log_file = os.path.abspath(log_file)
            if 'runtimefile' in log_params and log_params[
                    'runtimefile'] is not None:
                runtime_file = log_params['runtimefile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in runtime_file:
                    runtime_file = wp + os.sep + runtime_file
                    runtime_file = os.path.abspath(runtime_file)

        # remove out_files to avoid any file IO related error
        new_out_files = list()
        if out_files is not None:
            if not isinstance(out_files, dict):
                TauDEM.error('The output files parameter must be a dict!')
            for (pid, out_file) in list(out_files.items()):
                if out_file is None:
                    continue
                if isinstance(out_file, list) or isinstance(out_file, tuple):
                    for idx, outf in enumerate(out_file):
                        if outf is None:
                            continue
                        outf = FileClass.get_file_fullpath(outf, wp)
                        FileClass.remove_files(outf)
                        out_files[pid][idx] = outf
                        new_out_files.append(outf)
                else:
                    out_file = FileClass.get_file_fullpath(out_file, wp)
                    FileClass.remove_files(out_file)
                    out_files[pid] = out_file
                    new_out_files.append(out_file)

        # concatenate command line
        commands = list()
        # MPI header
        if mpi_params is not None:
            if not isinstance(mpi_params, dict):
                TauDEM.error('The MPI settings parameter must be a dict!')
            if 'mpipath' in mpi_params and mpi_params['mpipath'] is not None:
                commands.append(mpi_params['mpipath'] + os.sep + 'mpiexec')
            else:
                commands.append('mpiexec')
            if 'hostfile' in mpi_params and mpi_params['hostfile'] is not None \
                    and not StringClass.string_match(mpi_params['hostfile'], 'none') \
                    and os.path.isfile(mpi_params['hostfile']):
                commands.append('-f')
                commands.append(mpi_params['hostfile'])
            if 'n' in mpi_params and mpi_params['n'] > 1:
                commands.append('-n')
                commands.append(str(mpi_params['n']))
            else:  # If number of processor is less equal than 1, then do not call mpiexec.
                commands = []
        # append TauDEM function name, which can be full path or just one name
        commands.append(function_name)
        # append input files
        for (pid, infile) in list(in_files.items()):
            if infile is None:
                continue
            if pid[0] != '-':
                pid = '-' + pid
            commands.append(pid)
            if isinstance(infile, list) or isinstance(infile, tuple):
                commands.append(' '.join(tmpf for tmpf in infile))
            else:
                commands.append(infile)
        # append input parameters
        if in_params is not None:
            if not isinstance(in_params, dict):
                TauDEM.error('The input parameters must be a dict!')
            for (pid, v) in list(in_params.items()):
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                # allow for parameter which is an flag without value
                if v != '' and v is not None:
                    if MathClass.isnumerical(v):
                        commands.append(str(v))
                    else:
                        commands.append(v)
        # append output parameters
        if out_files is not None:
            for (pid, outfile) in list(out_files.items()):
                if outfile is None:
                    continue
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                if isinstance(outfile, list) or isinstance(outfile, tuple):
                    commands.append(' '.join(tmpf for tmpf in outfile))
                else:
                    commands.append(outfile)
        # run command
        runmsg = UtilClass.run_command(commands)
        TauDEM.log(runmsg, log_file)
        TauDEM.output_runtime_to_log(function_name, runmsg, runtime_file)
        # Check out_files, raise RuntimeError if not exist.
        for of in new_out_files:
            if not os.path.exists(of):
                TauDEM.error('%s failed, and the %s was not generated!' %
                             (function_name, of))
                return False
        return True
コード例 #31
0
    def watershed_delineation(np,
                              dem,
                              outlet_file=None,
                              thresh=0,
                              singlebasin=False,
                              workingdir=None,
                              mpi_bin=None,
                              bin_dir=None,
                              logfile=None,
                              runtime_file=None,
                              hostfile=None):
        """Watershed Delineation."""
        # 1. Check directories
        if not os.path.exists(dem):
            TauDEM.error('DEM: %s is not existed!' % dem)
        dem = os.path.abspath(dem)
        if workingdir is None:
            workingdir = os.path.dirname(dem)
        namecfg = TauDEMFilesUtils(workingdir)
        workingdir = namecfg.workspace
        UtilClass.mkdir(workingdir)
        # 2. Check log file
        if logfile is not None and FileClass.is_file_exists(logfile):
            os.remove(logfile)
        # 3. Get predefined intermediate file names
        filled_dem = namecfg.filldem
        flow_dir = namecfg.d8flow
        slope = namecfg.slp
        flow_dir_dinf = namecfg.dinf
        slope_dinf = namecfg.dinf_slp
        dir_code_dinf = namecfg.dinf_d8dir
        weight_dinf = namecfg.dinf_weight
        acc = namecfg.d8acc
        stream_raster = namecfg.stream_raster
        default_outlet = namecfg.outlet_pre
        modified_outlet = namecfg.outlet_m
        stream_skeleton = namecfg.stream_pd
        acc_with_weight = namecfg.d8acc_weight
        stream_order = namecfg.stream_order
        ch_network = namecfg.channel_net
        ch_coord = namecfg.channel_coord
        stream_net = namecfg.streamnet_shp
        subbasin = namecfg.subbsn
        dist2_stream_d8 = namecfg.dist2stream_d8

        # 4. perform calculation
        UtilClass.writelog(logfile,
                           "[Output] %d..., %s" % (10, "pitremove DEM..."),
                           'a')
        TauDEM.pitremove(np,
                         dem,
                         filled_dem,
                         workingdir,
                         mpi_bin,
                         bin_dir,
                         log_file=logfile,
                         runtime_file=runtime_file,
                         hostfile=hostfile)
        UtilClass.writelog(
            logfile, "[Output] %d..., %s" %
            (20, "Calculating D8 and Dinf flow direction..."), 'a')
        TauDEM.d8flowdir(np,
                         filled_dem,
                         flow_dir,
                         slope,
                         workingdir,
                         mpi_bin,
                         bin_dir,
                         log_file=logfile,
                         runtime_file=runtime_file,
                         hostfile=hostfile)
        TauDEM.dinfflowdir(np,
                           filled_dem,
                           flow_dir_dinf,
                           slope_dinf,
                           workingdir,
                           mpi_bin,
                           bin_dir,
                           log_file=logfile,
                           runtime_file=runtime_file,
                           hostfile=hostfile)
        DinfUtil.output_compressed_dinf(flow_dir_dinf, dir_code_dinf,
                                        weight_dinf)
        UtilClass.writelog(
            logfile, "[Output] %d..., %s" % (30, "D8 flow accumulation..."),
            'a')
        TauDEM.aread8(np,
                      flow_dir,
                      acc,
                      None,
                      None,
                      False,
                      workingdir,
                      mpi_bin,
                      bin_dir,
                      log_file=logfile,
                      runtime_file=runtime_file,
                      hostfile=hostfile)
        UtilClass.writelog(
            logfile, "[Output] %d..., %s" %
            (40, "Generating stream raster initially..."), 'a')
        min_accum, max_accum, mean_accum, std_accum = RasterUtilClass.raster_statistics(
            acc)
        TauDEM.threshold(np,
                         acc,
                         stream_raster,
                         mean_accum,
                         workingdir,
                         mpi_bin,
                         bin_dir,
                         log_file=logfile,
                         runtime_file=runtime_file,
                         hostfile=hostfile)
        UtilClass.writelog(
            logfile, "[Output] %d..., %s" % (50, "Moving outlet to stream..."),
            'a')
        if outlet_file is None:
            outlet_file = default_outlet
            TauDEM.connectdown(np,
                               flow_dir,
                               acc,
                               outlet_file,
                               wtsd=None,
                               workingdir=workingdir,
                               mpiexedir=mpi_bin,
                               exedir=bin_dir,
                               log_file=logfile,
                               runtime_file=runtime_file,
                               hostfile=hostfile)
        TauDEM.moveoutletstostrm(np,
                                 flow_dir,
                                 stream_raster,
                                 outlet_file,
                                 modified_outlet,
                                 workingdir,
                                 mpi_bin,
                                 bin_dir,
                                 log_file=logfile,
                                 runtime_file=runtime_file,
                                 hostfile=hostfile)
        UtilClass.writelog(
            logfile,
            "[Output] %d..., %s" % (60, "Generating stream skeleton..."), 'a')
        TauDEM.peukerdouglas(np,
                             filled_dem,
                             stream_skeleton,
                             workingdir,
                             mpi_bin,
                             bin_dir,
                             log_file=logfile,
                             runtime_file=runtime_file,
                             hostfile=hostfile)
        UtilClass.writelog(
            logfile,
            "[Output] %d..., %s" % (70, "Flow accumulation with outlet..."),
            'a')
        tmp_outlet = None
        if singlebasin:
            tmp_outlet = modified_outlet
        TauDEM.aread8(np,
                      flow_dir,
                      acc_with_weight,
                      tmp_outlet,
                      stream_skeleton,
                      False,
                      workingdir,
                      mpi_bin,
                      bin_dir,
                      log_file=logfile,
                      runtime_file=runtime_file,
                      hostfile=hostfile)

        if thresh <= 0:  # find the optimal threshold using dropanalysis function
            UtilClass.writelog(
                logfile, "[Output] %d..., %s" %
                (75, "Drop analysis to select optimal threshold..."), 'a')
            min_accum, max_accum, mean_accum, std_accum = \
                RasterUtilClass.raster_statistics(acc_with_weight)
            if mean_accum - std_accum < 0:
                minthresh = mean_accum
            else:
                minthresh = mean_accum - std_accum
            maxthresh = mean_accum + std_accum
            numthresh = 20
            logspace = 'true'
            drp_file = namecfg.drptxt
            TauDEM.dropanalysis(np,
                                filled_dem,
                                flow_dir,
                                acc_with_weight,
                                acc_with_weight,
                                modified_outlet,
                                minthresh,
                                maxthresh,
                                numthresh,
                                logspace,
                                drp_file,
                                workingdir,
                                mpi_bin,
                                bin_dir,
                                log_file=logfile,
                                runtime_file=runtime_file,
                                hostfile=hostfile)
            if not FileClass.is_file_exists(drp_file):
                raise RuntimeError(
                    "Dropanalysis failed and drp.txt was not created!")
            drpf = open(drp_file, "r")
            temp_contents = drpf.read()
            (beg, thresh) = temp_contents.rsplit(' ', 1)
            print(thresh)
            drpf.close()
        UtilClass.writelog(
            logfile,
            "[Output] %d..., %s" % (80, "Generating stream raster..."), 'a')
        TauDEM.threshold(np,
                         acc_with_weight,
                         stream_raster,
                         float(thresh),
                         workingdir,
                         mpi_bin,
                         bin_dir,
                         log_file=logfile,
                         runtime_file=runtime_file,
                         hostfile=hostfile)
        UtilClass.writelog(
            logfile, "[Output] %d..., %s" % (90, "Generating stream net..."),
            'a')
        TauDEM.streamnet(np,
                         filled_dem,
                         flow_dir,
                         acc_with_weight,
                         stream_raster,
                         modified_outlet,
                         stream_order,
                         ch_network,
                         ch_coord,
                         stream_net,
                         subbasin,
                         workingdir,
                         mpi_bin,
                         bin_dir,
                         log_file=logfile,
                         runtime_file=runtime_file,
                         hostfile=hostfile)
        UtilClass.writelog(
            logfile, "[Output] %d..., %s" %
            (95, "Calculating distance to stream (D8)..."), 'a')
        TauDEM.d8hdisttostrm(np,
                             flow_dir,
                             stream_raster,
                             dist2_stream_d8,
                             1,
                             workingdir,
                             mpi_bin,
                             bin_dir,
                             log_file=logfile,
                             runtime_file=runtime_file,
                             hostfile=hostfile)
        UtilClass.writelog(
            logfile, "[Output] %d.., %s" %
            (100, "Original subbasin delineation is finished!"), 'a')
コード例 #32
0
    def metis_partition(g, weight, wp, bin_dir):
        """Partition subbasins into multiple groups by METIS

        Args:
            g: `NetworkX.DiGraph` object
            weight: weight of each node, e.g., area of subbasin, {subbasinID: weight}
            wp: output directory
            bin_dir: directory of METIS package
        Returns:
            group_dict: {subbasinID: {'group': group_number_list,
                                      'kmetis': group_ids_list_by_kmetis,
                                      'pmetis': group_ids_list_by_pmetis}
                        }
        """
        group_dict = dict()
        for subbsn_id in g.nodes():
            group_dict[subbsn_id] = {'group': list(), 'kmetis': list(), 'pmetis': list()}

        metis_input = ImportReaches2Mongo.prepare_node_with_weight_for_metis(g, weight, wp)
        # Creating group divided numbers
        nlist = list(range(1, 129))
        nlist.extend([192, 256, 384, 512, 768, 1536])
        # nlist should be less than the number of subbasin, otherwise it will make nonsense.
        ns = g.nodes()
        nlist = [x for x in nlist if x <= max(ns)]
        # Make directories for KMETIS and PMETIS
        UtilClass.mkdir(wp + os.path.sep + 'kmetis')
        UtilClass.mkdir(wp + os.path.sep + 'pmetis')
        for n in nlist:
            print('divide number: %d' % n)
            if n <= 1:
                for subbsn_id in g.nodes():
                    group_dict[subbsn_id]['group'].append(1)
                    group_dict[subbsn_id]['kmetis'].append(0)
                    group_dict[subbsn_id]['pmetis'].append(0)
                continue
            # kmetis, -ptype=kway, direct k-way partitioning (default)
            str_command = '"%s/gpmetis" %s %d' % (bin_dir, metis_input, n)
            result = UtilClass.run_command(str_command)
            with open('%s/kmetis/kmetisResult%d.txt' % (wp, n), 'w') as f_metis_output:
                for line in result:
                    f_metis_output.write(line)
            metis_output = '%s.part.%d' % (metis_input, n)
            with open(metis_output, 'r') as f:
                lines = f.readlines()
            group_kmetis = [int(item) for item in lines]
            adjust_group_result(weight, group_kmetis, n)
            shutil.move(metis_output, '%s/kmetis/metis.part.%d' % (wp, n))

            # pmetis, -ptype=rb, recursive bisectioning
            str_command = '"%s/gpmetis" -ptype=rb %s %d' % (bin_dir, metis_input, n)
            result = UtilClass.run_command(str_command)
            with open('%s/pmetis/pmetisResult%d.txt' % (wp, n), 'w') as f_metis_output:
                for line in result:
                    f_metis_output.write(line)
            with open(metis_output, 'r') as f:
                lines = f.readlines()
            group_pmetis = [int(item) for item in lines]
            adjust_group_result(weight, group_pmetis, n)
            shutil.move(metis_output, '%s/pmetis/metis.part.%d' % (wp, n))

            for i, (gk, gp) in enumerate(zip(group_kmetis, group_pmetis)):
                group_dict[i + 1]['group'].append(n)
                group_dict[i + 1]['kmetis'].append(gk)
                group_dict[i + 1]['pmetis'].append(gp)
        return group_dict