Example #1
0
def read_pareto_solutions_from_txt(txt_file, sce_name='scenario', field_name='gene_values'):
    # type: (AnyStr, AnyStr, AnyStr) -> (Dict[int, List[List[float]]])
    """Read Pareto points from `runtime.log` file.

    Args:
        txt_file: Full file path of `runtime.log` output by NSGA2 algorithm.
        sce_name: Field name followed by `generation`, e.g., 'calibrationID', 'scenario', etc.
        field_name: Filed name in header for gene values, 'gene_values' by default

    Returns:
        pareto_solutions: `OrderedDict`, key is generation ID, value is arrays of Pareto solutions
    """
    with open(txt_file, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    pareto_solutions = OrderedDict()
    found = False
    cur_gen = -1
    field_idx = -1

    for lno, line in enumerate(lines):
        str_line = line
        for LF in LFs:
            if LF in line:
                str_line = line.split(LF)[0]
                break
        if str_line == '':
            continue
        values = StringClass.extract_numeric_values_from_string(str_line)
        # Check generation
        if str_line[0] == '#' and 'Generation' in str_line:
            if len(values) != 1:
                continue
            # e.g., ###### Generation: 23 ######
            gen = int(values[0])
            found = True
            cur_gen = gen
            pareto_solutions[cur_gen] = list()
            continue
        if not found:  # If the first "###### Generation: 1 ######" has not been found.
            continue
        line_list = StringClass.split_string(str_line.upper(), ['\t'])
        if values is None:  # means header line
            if field_idx >= 0:
                continue
            for idx, v in enumerate(line_list):
                if field_name.upper() in v.upper():
                    field_idx = idx
                    break
            continue
        if field_idx < 0:
            continue
        # now append the real Pareto solutions data
        tmpvalues = StringClass.extract_numeric_values_from_string(line_list[field_idx])
        pareto_solutions[cur_gen].append(tmpvalues[:])

    return pareto_solutions
Example #2
0
 def read_optionfuzinf_section(self, _optfuzinf):
     """Optional parameter-settings for Fuzzy slope position inference."""
     if _optfuzinf not in self.cf.sections():
         return
     if self.cf.has_option(_optfuzinf, 'inferparams'):
         fuzinf_strs = self.cf.get(_optfuzinf, 'inferparams')
         if StringClass.string_match(fuzinf_strs, 'none'):
             return
         self.inferparam = dict()
         fuzinf_types = StringClass.split_string(fuzinf_strs, ';')
         if len(fuzinf_types) != len(self.slppostype):
             raise RuntimeError(
                 "InferParams (%s) MUST be consistent with slope position types"
                 " and separated by ';'!" % fuzinf_strs)
         for i, slppos in enumerate(self.slppostype):
             self.inferparam[slppos] = dict()
             infparams = StringClass.extract_numeric_values_from_string(
                 fuzinf_types[i])
             if len(infparams) % 4 != 0:
                 raise RuntimeError(
                     "Each item of InferParams MUST contains four elements,"
                     "i.e., Attribute No., FMF No., w1, w2! Please check item: "
                     "%s for %s." % (fuzinf_types[i], slppos))
             for j in range(int(len(infparams) / 4)):
                 attridx = int(infparams[j * 4]) - 1
                 attrname = self.selectedtopolist[attridx]
                 fmf = self._FMFTYPE[int(infparams[j * 4 + 1])]
                 curinfparam = self._FMFPARAM[fmf][:]
                 curinfparam[0] = infparams[j * 4 + 2]  # w1
                 curinfparam[3] = infparams[j * 4 + 3]  # w2
                 self.inferparam[slppos][attrname] = [fmf] + curinfparam
Example #3
0
    def read_bmp_parameters(self):
        """Read BMP configuration from MongoDB.
        Each BMP is stored in Collection as one item identified by 'SUBSCENARIO' field,
        so the `self.bmps_params` is dict with BMP_ID ('SUBSCENARIO') as key.
        """
        # client = ConnectMongoDB(self.modelcfg.host, self.modelcfg.port)
        # conn = client.get_conn()
        conn = MongoDBObj.client
        scenariodb = conn[self.scenario_db]

        bmpcoll = scenariodb[self.cfg.bmps_coll]
        findbmps = bmpcoll.find({}, no_cursor_timeout=True)
        for fb in findbmps:
            fb = UtilClass.decode_strs_in_dict(fb)
            if 'SUBSCENARIO' not in fb:
                continue
            curid = fb['SUBSCENARIO']
            if curid not in self.cfg.bmps_subids:
                continue
            if curid not in self.bmps_params:
                self.bmps_params[curid] = dict()
            for k, v in fb.items():
                if k == 'SUBSCENARIO':
                    continue
                elif k == 'LANDUSE':
                    if isinstance(v, int):
                        v = [v]
                    elif v == 'ALL' or v == '':
                        v = None
                    else:
                        v = StringClass.extract_numeric_values_from_string(v)
                        v = [int(abs(nv)) for nv in v]
                    self.bmps_params[curid][k] = v[:]
                elif k == 'SLPPOS':
                    if isinstance(v, int):
                        v = [v]
                    elif v == 'ALL' or v == '':
                        v = list(self.cfg.slppos_tags.keys())
                    else:
                        v = StringClass.extract_numeric_values_from_string(v)
                        v = [int(abs(nv)) for nv in v]
                    self.bmps_params[curid][k] = v[:]
                else:
                    self.bmps_params[curid][k] = v
Example #4
0
    def Precipitation(self, subbsn_id, start_time, end_time):
        # type: (int, datetime, datetime) -> List[List[Union[datetime, float]]]
        """
        The precipitation is read according to the subbasin ID.
            Especially when plot a specific subbasin (such as ID 3).
            For the whole basin, the subbasin ID is 0.
        Returns:
            Precipitation data list with the first element as datetime.
            [[Datetime1, value1], [Datetime2, value2], ..., [Datetimen, valuen]]
        """
        pcp_date_value = list()
        sitelist_tab = self.maindb[DBTableNames.main_sitelist]
        findsites = sitelist_tab.find_one({
            FieldNames.subbasin_id: subbsn_id,
            FieldNames.mode: self.Mode
        })
        if findsites is not None:
            site_liststr = findsites[FieldNames.site_p]
        else:
            raise RuntimeError(
                'Cannot find precipitation site for subbasin %d.' % subbsn_id)
        site_list = StringClass.extract_numeric_values_from_string(
            site_liststr)
        site_list = [int(v) for v in site_list]
        if len(site_list) == 0:
            raise RuntimeError(
                'Cannot find precipitation site for subbasin %d.' % subbsn_id)

        pcp_dict = OrderedDict()

        for pdata in self.climatedb[DBTableNames.data_values].find({
                DataValueFields.utc: {
                    "$gte": start_time,
                    '$lte': end_time
                },
                DataValueFields.type:
                DataType.p,
                DataValueFields.id: {
                    "$in": site_list
                }
        }).sort([(DataValueFields.utc, 1)]):
            curt = pdata[DataValueFields.utc]
            curv = pdata[DataValueFields.value]
            if curt not in pcp_dict:
                pcp_dict[curt] = 0.
            pcp_dict[curt] += curv
        # average
        if len(site_list) > 1:
            for t in pcp_dict:
                pcp_dict[t] /= len(site_list)
        for t, v in pcp_dict.items():
            # print(str(t), v)
            pcp_date_value.append([t, v])
        print('Read precipitation from %s to %s done.' %
              (start_time.strftime('%c'), end_time.strftime('%c')))
        return pcp_date_value
Example #5
0
def read_inf_param_from_file(conf):
    """Read fuzzy inference parameters from file."""
    params_list = list()
    with open(conf, 'r', encoding='utf-8') as f:
        for line in f.readlines():
            eles = line.split('\n')[0].split('\t')
            params = StringClass.extract_numeric_values_from_string(line.split('\n')[0])
            if StringClass.string_match(eles[0], 'Parameters') and len(params) >= 6:
                params_list.append([eles[1]] + [eles[3]] + params[-6:])
    return params_list
Example #6
0
    def read_bmp_parameters(self):
        """Read BMP configuration from MongoDB."""
        client = ConnectMongoDB(self.hostname, self.port)
        conn = client.get_conn()
        scenariodb = conn[self.bmp_scenario_db]

        bmpcoll = scenariodb[self.bmps_coll]
        findbmps = bmpcoll.find({}, no_cursor_timeout=True)
        for fb in findbmps:
            fb = UtilClass.decode_strs_in_dict(fb)
            if 'SUBSCENARIO' not in fb:
                continue
            curid = fb['SUBSCENARIO']
            if curid not in self.bmps_subids:
                continue
            if curid not in self.bmps_params:
                self.bmps_params[curid] = dict()
            for k, v in fb.items():
                if k == 'SUBSCENARIO':
                    continue
                elif k == 'LANDUSE':
                    if isinstance(v, int):
                        v = [v]
                    elif v == 'ALL' or v == '':
                        v = None
                    else:
                        v = StringClass.extract_numeric_values_from_string(v)
                        v = [int(abs(nv)) for nv in v]
                    self.bmps_params[curid][k] = v[:]
                elif k == 'SLPPOS':
                    if isinstance(v, int):
                        v = [v]
                    elif v == 'ALL' or v == '':
                        v = list(self.slppos_tags.keys())
                    else:
                        v = StringClass.extract_numeric_values_from_string(v)
                        v = [int(abs(nv)) for nv in v]
                    self.bmps_params[curid][k] = v[:]
                else:
                    self.bmps_params[curid][k] = v

        client.close()
Example #7
0
def read_pareto_popsize_from_txt(txt_file, sce_name='scenario'):
    # type: (AnyStr, AnyStr) -> (List[int], List[int])
    """Read the population size of each generations."""
    with open(txt_file, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    pareto_popnum = OrderedDict()
    found = False
    cur_gen = -1
    iden_idx = -1
    for line in lines:
        str_line = line
        for LF in LFs:
            if LF in line:
                str_line = line.split(LF)[0]
                break
        if str_line == '':
            continue
        values = StringClass.extract_numeric_values_from_string(str_line)
        # Check generation
        if str_line[0] == '#' and 'Generation' in str_line:
            if len(values) != 1:
                continue
            gen = int(values[0])
            found = True
            cur_gen = gen
            pareto_popnum[cur_gen] = list()
            continue
        if not found:
            continue
        if values is None:  # means header line
            line_list = StringClass.split_string(str_line, ['\t'])
            for idx, v in enumerate(line_list):
                if StringClass.string_match(v, sce_name):
                    iden_idx = idx
                    break
            continue
        if iden_idx < 0:
            continue
        # now append the real Pareto front point data
        pareto_popnum[cur_gen].append(int(values[iden_idx]))

    all_sceids = list()
    acc_num = list()
    genids = sorted(list(pareto_popnum.keys()))
    for idx, genid in enumerate(genids):
        for _id in pareto_popnum[genid]:
            if _id not in all_sceids:
                all_sceids.append(_id)
        acc_num.append(len(all_sceids))
    return genids, acc_num
Example #8
0
def get_input_cfgs():
    """Get model configuration arguments.

    Returns:
            InputArgs object.
    """
    parser = argparse.ArgumentParser(
        description="Read AutoFuzSlpPos configurations.")
    parser.add_argument('-ini', help="Full path of configuration file.")
    parser.add_argument('-bin',
                        help="Path of executable programs, which will override"
                        "exeDir in *.ini file.")
    parser.add_argument('-proc',
                        help="Number of processor for parallel computing, "
                        "which will override inputProc in *.ini file.")
    parser.add_argument('-dem', help="DEM of study area.")
    parser.add_argument('-root',
                        help="Workspace to store results, which will override "
                        "rootDir in *.ini file.")
    args = parser.parse_args()

    ini_file = args.ini
    bin_dir = args.bin
    input_proc = args.proc
    rawdem = args.dem
    root_dir = args.root
    if input_proc is not None:
        xx = StringClass.extract_numeric_values_from_string(input_proc)
        if xx is None or len(xx) != 1:
            raise RuntimeError("-proc MUST be one integer number!")
        input_proc = int(xx[0])
    else:
        input_proc = -1
    if not FileClass.is_file_exists(ini_file):
        if FileClass.is_file_exists(rawdem) and os.path.isdir(bin_dir):
            # In this scenario, the script can be executed by default setting, i.e., the *.ini
            # file is not required.
            cf = None
            if input_proc < 0:
                input_proc = cpu_count() / 2
        else:
            raise RuntimeError(
                "*.ini file MUST be provided when '-dem', '-bin', "
                "and '-root' are not provided!")
    else:
        cf = ConfigParser()
        cf.read(ini_file)

    return AutoFuzSlpPosConfig(cf, bin_dir, input_proc, rawdem, root_dir)
Example #9
0
 def ParseTimespan(self, items):
     # type: (List[AnyStr]) -> Dict[AnyStr, Dict[AnyStr, Union[float, Dict[AnyStr, float]]]]
     """The format of self.timespan is different for OpenMP version and MPI&OpenMP version.
     For OpenMP version:
        {'IO': {'Input': 0.2,
                'Output': 0.04
               }
         'COMP': {'TSD_RD_P': 0.0001,  # All modules
                  'ALL': 12.3
                 }
         'SIMU': {'ALL': 14.1}
        }
     For MPI&OpenMP version:
        {'MAX': {'IO': {'Input': 0.1,
                        'Output': 0.02,
                        'ALL': 0.12
                       }
                 'COMP': {'Slope': 5,
                          'Channel': 0.5,
                          'Barrier': 0.1,
                          'ALL': 5.6
                         }
                 'SIMU': {'ALL': 10.1}
                }
         'MIN': {...}
         'AVG': {...}
        }
     """
     for item in items:
         if 'TIMESPAN' not in item:
             continue
         item = item.split('\n')[0]
         values = StringClass.extract_numeric_values_from_string(item)
         if values is None or len(values) != 1:
             continue
         time = values[0]
         titles = item.replace(
             '[', '').split(']')[:-1]  # e.g., 'TIMESPAN', 'COMP', 'ALL'
         if len(titles) < 3:  # e.g., 'TIMESPAN', 'MAX', 'COMP', 'ALL'
             continue
         titles = [title.strip() for title in titles]
         self.timespan.setdefault(titles[1], dict())
         if len(titles) > 3:
             self.timespan[titles[1]].setdefault(titles[2], dict())
             self.timespan[titles[1]][titles[2]].setdefault(titles[3], time)
         else:
             self.timespan[titles[1]].setdefault(titles[2], time)
         return self.timespan
Example #10
0
    def export_landuse_lookup_files_from_mongodb(cfg, maindb):
        """export landuse lookup tables to txt file from MongoDB."""
        lookup_dir = cfg.dirs.lookup
        property_namelist = ModelParamDataUtils.landuse_fields
        property_map = dict()
        property_namelist.append('USLE_P')
        query_result = maindb['LANDUSELOOKUP'].find()
        if query_result is None:
            raise RuntimeError(
                'LanduseLoop Collection is not existed or empty!')
        count = 0
        for row in query_result:
            # print(row)
            value_map = dict()
            for i, p_name in enumerate(property_namelist):
                if StringClass.string_match(p_name, 'USLE_P'):
                    # Currently, USLE_P is set as 1 for all landuse.
                    value_map[p_name] = 1
                else:
                    # I do not know why manning * 10 here. Just uncommented now. lj
                    # if StringClass.string_match(p_name, "Manning"):
                    #     value_map[p_name] = row.get(p_name) * 10
                    # else:
                    v = row.get(p_name)
                    if is_string(v):
                        v = StringClass.extract_numeric_values_from_string(
                            v)[0]
                    value_map[p_name] = v
            count += 1
            property_map[count] = value_map

        n = len(property_map)
        UtilClass.rmmkdir(lookup_dir)
        for propertyName in property_namelist:
            with open('%s/%s.txt' % (
                    lookup_dir,
                    propertyName,
            ),
                      'w',
                      encoding='utf-8') as f:
                f.write('%d\n' % n)
                for prop_id in property_map:
                    s = '%d %f\n' % (int(property_map[prop_id]['LANDUSE_ID']),
                                     property_map[prop_id][propertyName])
                    f.write('%s' % s)
Example #11
0
 def ParseTimespan(self, items):
     """The format of self.timespan is different for OpenMP version and MPI&OpenMP version.
     For OpenMP version:
        {'IO': {'Input': 0.2,
                'Output': 0.04
               }
         'COMP': {'TSD_RD_P': 0.0001,  # All modules
                  'ALL': 12.3
                 }
         'SIMU': {'ALL': 14.1}
        }
     For MPI&OpenMP version:
        {'MAX': {'IO': {'Input': 0.1,
                        'Output': 0.02,
                        'ALL': 0.12
                       }
                 'COMP': {'Slope': 5,
                          'Channel': 0.5,
                          'Barrier': 0.1,
                          'ALL': 5.6
                         }
                 'SIMU': {'ALL': 10.1}
                }
         'MIN': {...}
         'AVG': {...}
        }
     """
     for item in items:
         if 'TIMESPAN' not in item:
             continue
         item = item.split('\n')[0]
         values = StringClass.extract_numeric_values_from_string(item)
         if values is None or len(values) != 1:
             continue
         time = values[0]
         titles = item.replace('[', '').split(']')[:-1]  # e.g., 'TIMESPAN', 'COMP', 'ALL'
         if len(titles) < 3:  # e.g., 'TIMESPAN', 'MAX', 'COMP', 'ALL'
             continue
         titles = [title.strip() for title in titles]
         self.timespan.setdefault(titles[1], dict())
         if len(titles) > 3:
             self.timespan[titles[1]].setdefault(titles[2], dict())
             self.timespan[titles[1]][titles[2]].setdefault(titles[3], time)
         else:
             self.timespan[titles[1]].setdefault(titles[2], time)
Example #12
0
def read_field_arrays_from_csv(csvf):
    data_items = read_data_items_from_txt(csvf)
    if len(data_items) < 2:
        return
    flds = data_items[0]
    flds_array = dict()
    for idx, data_item in enumerate(data_items):
        if idx == 0:
            continue
        data_item_values = StringClass.extract_numeric_values_from_string(','.join(data_item))
        for fld_idx, fld_name in enumerate(flds):
            if fld_idx == 0 or StringClass.string_match(fld_name, 'FID'):
                continue
            if fld_name not in flds_array:
                flds_array[fld_name] = list()
            flds_array[fld_name].append(data_item_values[fld_idx])
    # for key, value in list(flds_array.items()):
    #     print('%s: %d' % (key, len(value)))
    return combine_multi_layers_array(flds_array)
def read_hypervolume(hypervlog):
    if not os.path.exists(hypervlog):
        return None, None, None
    x = list()
    nmodel = list()
    hyperv = list()
    with open(hypervlog, 'r') as f:
        lines = f.readlines()
    for line in lines:
        values = StringClass.extract_numeric_values_from_string(line)
        if values is None:
            continue
        if len(values) < 2:
            continue
        x.append(int(values[0]))
        hyperv.append(values[-1])
        if len(values) >= 3:
            nmodel.append(int(values[1]))
    return x, hyperv, nmodel
Example #14
0
def read_field_arrays_from_csv(csvf):
    data_items = read_data_items_from_txt(csvf)
    if len(data_items) < 2:
        return
    flds = data_items[0]
    flds_array = dict()
    for idx, data_item in enumerate(data_items):
        if idx == 0:
            continue
        data_item_values = StringClass.extract_numeric_values_from_string(
            ','.join(data_item))
        for fld_idx, fld_name in enumerate(flds):
            if fld_idx == 0 or StringClass.string_match(fld_name, 'FID'):
                continue
            if fld_name not in flds_array:
                flds_array[fld_name] = list()
            flds_array[fld_name].append(data_item_values[fld_idx])
    # for key, value in list(flds_array.items()):
    #     print('%s: %d' % (key, len(value)))
    return combine_multi_layers_array(flds_array)
Example #15
0
def main():
    txtInOutPath = r'C:\z_data\zhongTianShe\model_data_swat\TxtInOut'
    #txtInOutPath = r'D:\tmp\update_mgt'
    plt_mgt_op = r'C:\z_data\zhongTianShe\model_data_swat\TxtInOut\opSchedules.txt'

    # In SWAT source readmgt.f, the format of one operation item is:
    # 5200 format (1x,i2,1x,i2,1x,f8.3,1x,i2,1x,i4,1x,i3,1x,i2,1x,f12.5,1x,
    #     &        f6.2,1x,f11.5,1x,f4.2,1x,f6.2,1x,f5.2,i12)
    # which transformed to Python is (totally 92 characters):
    line_fmt = ' %2d %2d %8.3f %2d %4d %3d %2d %12.5f %6.2f %11.5f %4.2f %6.2f %5.2f%12d\n'
    op_schedules = list()
    op_lines = open(plt_mgt_op, 'r').readlines()
    for i in range(1, len(op_lines)):
        v = StringClass.extract_numeric_values_from_string(op_lines[i].strip())
        if len(v) < 14:
            break
        tmp_line = line_fmt % (v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
                               v[8], v[9], v[10], v[11], v[12], v[13])
        op_schedules.append(tmp_line)

    mgt_files = FileClass.get_filename_by_suffixes(txtInOutPath, ['.mgt'])
    for mgt_file in mgt_files:
        # print mgt_file
        f = open(txtInOutPath + os.sep + mgt_file, 'r')
        fcont = f.readlines()
        f.close()
        if 'Luse:AGRL' not in fcont[0].rstrip():
            continue
        print('update %s...' % mgt_file)
        fcont_new = list()
        for line in fcont:
            fcont_new.append(line)
            if 'Operation Schedule' in line.rstrip():
                break
        # Write new Operation Schedule
        fcont_new += op_schedules
        f = open(txtInOutPath + os.sep + mgt_file, 'w')
        for line in fcont_new:
            f.write(line)
        f.close()
Example #16
0
def read_hypervolume(hypervlog):
    # type: (AnyStr) -> (List[int], List[float], List[float])
    """Read hypervolume data from file."""
    if not os.path.exists(hypervlog):
        print('Error: The hypervolume log file %s is not existed!' % hypervlog)
        return None, None, None
    x = list()  # Generation No.
    nmodel = list()  # Newly executed models count
    hyperv = list()  # Hypervolume value
    with open(hypervlog, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    for line in lines:
        values = StringClass.extract_numeric_values_from_string(line)
        if values is None:
            continue
        if len(values) < 2:
            continue
        x.append(int(values[0]))
        hyperv.append(values[-1])
        if len(values) >= 3:
            nmodel.append(int(values[1]))
    return x, hyperv, nmodel
Example #17
0
def main():
    """TEST CODE"""
    lat_station = 31.45
    ssd_txt = r'C:\z_data\zhongTianShe\model_data_swat\climate\ssd_LY.txt'
    sr_txt = r'C:\z_data\zhongTianShe\model_data_swat\climate\sr_LY.txt'
    sr = list()
    f = open(ssd_txt, 'r')
    ssd_items = f.readlines()
    f.close()
    st_str = ssd_items[0].strip()
    st_time = StringClass.get_datetime(st_str)
    for i in range(1, len(ssd_items)):
        ssd_tmp = StringClass.extract_numeric_values_from_string(ssd_items[i])
        time_tmp = st_time + timedelta(days=i - 1)
        sr_tmp = ([round(rs(DateClass.day_of_year(time_tmp), v, lat_station * PI / 180.), 1)]
                  for v in ssd_tmp)
        sr.extend(sr_tmp)
    f = open(sr_txt, 'w')
    f.write(st_str + '\n')
    for sr_tmp in sr:
        f.write(','.join(str(v) for v in sr_tmp) + '\n')
    f.close()
Example #18
0
    def export_landuse_lookup_files_from_mongodb(cfg, maindb):
        """export landuse lookup tables to txt file from MongoDB."""
        lookup_dir = cfg.dirs.lookup
        property_namelist = ModelParamDataUtils.landuse_fields
        property_map = dict()
        property_namelist.append('USLE_P')
        query_result = maindb['LANDUSELOOKUP'].find()
        if query_result is None:
            raise RuntimeError('LanduseLoop Collection is not existed or empty!')
        count = 0
        for row in query_result:
            # print(row)
            value_map = dict()
            for i, p_name in enumerate(property_namelist):
                if StringClass.string_match(p_name, "USLE_P"):
                    # Currently, USLE_P is set as 1 for all landuse.
                    value_map[p_name] = 1
                else:
                    # I do not know why manning * 10 here. Just uncommented now. lj
                    # if StringClass.string_match(p_name, "Manning"):
                    #     value_map[p_name] = row.get(p_name) * 10
                    # else:
                    v = row.get(p_name)
                    if isinstance(v, unicode) or isinstance(v, str):
                        v = StringClass.extract_numeric_values_from_string(v)[0]
                    value_map[p_name] = v
            count += 1
            property_map[count] = value_map

        n = len(property_map)
        UtilClass.rmmkdir(lookup_dir)
        for propertyName in property_namelist:
            with open("%s/%s.txt" % (lookup_dir, propertyName,), 'w') as f:
                f.write("%d\n" % n)
                for prop_id in property_map:
                    s = "%d %f\n" % (int(property_map[prop_id]['LANDUSE_ID']),
                                     property_map[prop_id][propertyName])
                    f.write(s)
Example #19
0
 def read_optiontyploc_section(self, _opttyploc):
     """Optional parameter-settings for Typical Locations selection"""
     if _opttyploc not in self.cf.sections():
         return
     # handling slope position types and tags
     if self.cf.has_option(_opttyploc, 'slopepositiontypes'):
         self.slppostype = list()
         typstrs = self.cf.get(_opttyploc, 'slopepositiontypes')
         self.slppostype = StringClass.split_string(typstrs.lower(), ',')
     else:
         # five slope position system will be adapted.
         pass
     if self.cf.has_option(_opttyploc, 'slopepositiontags'):
         self.slppostag = list()
         tagstrs = self.cf.get(_opttyploc, 'slopepositiontags')
         self.slppostag = StringClass.extract_numeric_values_from_string(
             tagstrs)
         if len(self.slppostag) != len(self.slppostype):
             raise RuntimeError(
                 "The input number of slope position types and "
                 "tags are not the same!")
     else:
         self.slppostag = list()
         for i in range(len(self.slppostype)):
             self.slppostag.append(pow(2, i))
     for typ in self.slppostype:
         self.singleslpposconf[typ] = SingleSlpPosFiles(self.ws, typ)
     # handling selected topographic attributes
     if self.cf.has_option(_opttyploc, 'terrainattrdict'):
         self.selectedtopolist = list()
         self.selectedtopo = dict()
         terrain_attr_dict_str = self.cf.get(_opttyploc, 'terrainattrdict')
         attrpath_strs = StringClass.split_string(terrain_attr_dict_str,
                                                  ';')
         for i, singattr in enumerate(attrpath_strs):
             ap = StringClass.split_string(singattr, ',')
             attrname = ap[0].lower()
             if i == 0 and not StringClass.string_match(attrname, 'rpi'):
                 attrname = 'rpi'
             self.selectedtopolist.append(attrname)
             attrpath = self.topoparam.get_attr_file(attrname)
             if attrpath is not None:
                 self.selectedtopo[attrname] = attrpath
             else:  # this should be user-defined attribute, and should has a valid file path
                 if len(ap) != 2:
                     raise RuntimeError(
                         "User defined topographic attribute (%s) MUST have "
                         "an existed file path!" % singattr)
                 attrp = AutoFuzSlpPosConfig.check_file_available(ap[1])
                 if attrp is None:
                     raise RuntimeError(
                         "User defined topographic attribute (%s) MUST have "
                         "an existed file path!" % singattr)
                 self.selectedtopo[attrname] = attrp
                 is_regional = False
                 if i == 0:  # the first one is regional attribute
                     is_regional = True
                 self.topoparam.add_user_defined_attribute(
                     attrname, attrp, is_regional)
     # handling several parameters used in extracting typical location
     if self.cf.has_option(_opttyploc, 'typlocextractparam'):
         self.param4typloc = dict()
         base_param_str = self.cf.get(_opttyploc, 'typlocextractparam')
         base_param_floats = StringClass.extract_numeric_values_from_string(
             base_param_str)
         defnum = len(self._DEFAULT_PARAM_TYPLOC)
         if len(base_param_floats) == defnum:
             for slppos in self.slppostype:
                 self.param4typloc[slppos] = base_param_floats[:]
         elif len(base_param_floats) == len(self.slppostype) * defnum:
             for i, slppos in enumerate(self.slppostype):
                 self.param4typloc[slppos] = base_param_floats[i *
                                                               defnum:(i +
                                                                       1) *
                                                               defnum]
         else:
             raise RuntimeError("TyplocExtractParam MUST has the number of "
                                "%d or %d!" %
                                (defnum, len(self.slppostype) * defnum))
     else:
         for slppos in self.slppostype:
             self.param4typloc[slppos] = self._DEFAULT_PARAM_TYPLOC[:]
     # handling Pre-defined fuzzy membership function shapes of each terrain attribute
     #    for each slope position
     if self.cf.has_option(_opttyploc, 'fuzinfdefault'):
         self.infshape = dict()
         fuz_inf_shp_strs = self.cf.get(_opttyploc, 'fuzinfdefault')
         # inference shapes are separated by SIMICOLON bewteen slope positions
         fuz_inf_shp_types = StringClass.split_string(fuz_inf_shp_strs, ';')
         if len(fuz_inf_shp_types) != len(self.slppostype):
             raise RuntimeError(
                 "FuzInfDefault (%s) MUST be consistent with slope position types"
                 " and separated by ';'!" % fuz_inf_shp_strs)
         for i, slppos in enumerate(self.slppostype):
             self.infshape[slppos] = dict()
             # inference shapes are separated by COMMA bewteen topographic attributes
             infshps = StringClass.split_string(fuz_inf_shp_types[i], ',')
             if len(infshps) != len(self.selectedtopolist):
                 raise RuntimeError(
                     "FuzInfDefault (%s) for each slope position MUST have "
                     "the same size with TerrainAttrDict" %
                     fuz_inf_shp_types[i])
             for j, attrn in enumerate(self.selectedtopolist):
                 self.infshape[slppos][attrn] = infshps[j]
     else:
         if len(self.slppostype) != 5:
             raise RuntimeError(
                 "Only the fuzzy membership function shapes of "
                 "5 slope position system are built-in. For other "
                 "classification system, please set as input!")
     # handling value ranges of terrain attributes for extracting prototypes
     if self.cf.has_option(_opttyploc, 'valueranges'):
         self.extractrange = dict()
         value_rng_strs = self.cf.get(_opttyploc, 'valueranges')
         value_rng_types = StringClass.split_string(value_rng_strs, ';')
         if len(value_rng_types) != len(self.slppostype):
             raise RuntimeError(
                 "ValueRanges (%s) MUST be consistent with slope position types"
                 " and separated by ';'!" % value_rng_strs)
         for i, slppos in enumerate(self.slppostype):
             self.extractrange[slppos] = dict()
             value_rngs = StringClass.extract_numeric_values_from_string(
                 value_rng_types[i])
             if len(value_rngs) == 0 or len(value_rngs) % 3 != 0:
                 raise RuntimeError(
                     "Each item of ValueRanges MUST contains three elements,"
                     "i.e., Attributes No., Min, Max! Please check item: "
                     "%s for %s." % (value_rng_types[i], slppos))
             for j in range(int(len(value_rngs) / 3)):
                 attridx = int(value_rngs[j * 3]) - 1
                 attrname = self.selectedtopolist[attridx]
                 min_v = value_rngs[j * 3 + 1]
                 max_v = value_rngs[j * 3 + 2]
                 self.extractrange[slppos][attrname] = [min_v, max_v]
     else:
         if len(self.slppostype) != 5:
             raise RuntimeError(
                 "Only the extract value ranges of "
                 "5 slope position system are built-in. For other "
                 "classification system, please set as input!")
Example #20
0
    def __init__(self, cf, method='nsga2'):
        # type: (ConfigParser, str) -> None
        """Initialization."""
        # 1. SEIMS model related
        self.model = ParseSEIMSConfig(cf)  # type: ParseSEIMSConfig

        # 2. Common settings of BMPs scenario
        self.eval_stime = None  # type: Optional[datetime]
        self.eval_etime = None  # type: Optional[datetime]
        self.worst_econ = 0.
        self.worst_env = 0.
        self.runtime_years = 0.
        self.export_sce_txt = False
        self.export_sce_tif = False
        if 'Scenario_Common' not in cf.sections():
            raise ValueError(
                '[Scenario_Common] section MUST be existed in *.ini file.')
        self.eval_stime = parse_datetime_from_ini(cf, 'Scenario_Common',
                                                  'eval_time_start')
        self.eval_etime = parse_datetime_from_ini(cf, 'Scenario_Common',
                                                  'eval_time_end')
        self.worst_econ = cf.getfloat('Scenario_Common', 'worst_economy')
        self.worst_env = cf.getfloat('Scenario_Common', 'worst_environment')
        self.runtime_years = cf.getfloat('Scenario_Common', 'runtime_years')
        if cf.has_option('Scenario_Common', 'export_scenario_txt'):
            self.export_sce_txt = cf.getboolean('Scenario_Common',
                                                'export_scenario_txt')
        if cf.has_option('Scenario_Common', 'export_scenario_tif'):
            self.export_sce_tif = cf.getboolean('Scenario_Common',
                                                'export_scenario_tif')

        # 3. Application specific setting section [BMPs]
        # Selected BMPs, the key is BMPID, and value is the BMP information dict
        self.bmps_info = dict(
        )  # type: Dict[int, Dict[AnyStr, Union[int, float, AnyStr, List[Union[int, float, AnyStr]]]]]
        # BMPs to be constant for generated scenarios during optimization, same format with bmps_info
        self.bmps_retain = dict(
        )  # type: Dict[int, Dict[AnyStr, Union[int, float, AnyStr, List[Union[int, float, AnyStr]]]]]
        self.eval_info = dict(
        )  # type: Dict[AnyStr, Union[int, float, AnyStr]]
        self.bmps_cfg_unit = 'CONNFIELD'  # type: AnyStr
        self.bmps_cfg_method = 'RAND'  # type: AnyStr
        if 'BMPs' not in cf.sections():
            raise ValueError(
                '[BMPs] section MUST be existed for specific scenario analysis.'
            )

        bmpsinfostr = cf.get('BMPs', 'bmps_info')
        self.bmps_info = UtilClass.decode_strs_in_dict(json.loads(bmpsinfostr))
        if cf.has_option('BMPs', 'bmps_retain'):
            bmpsretainstr = cf.get('BMPs', 'bmps_retain')
            self.bmps_retain = json.loads(bmpsretainstr)
            self.bmps_retain = UtilClass.decode_strs_in_dict(self.bmps_retain)
        evalinfostr = cf.get('BMPs', 'eval_info')
        self.eval_info = UtilClass.decode_strs_in_dict(json.loads(evalinfostr))
        bmpscfgunitstr = cf.get('BMPs', 'bmps_cfg_units')
        bmpscfgunitdict = UtilClass.decode_strs_in_dict(
            json.loads(bmpscfgunitstr))
        for unitname, unitcfg in viewitems(bmpscfgunitdict):
            self.bmps_cfg_unit = unitname
            if self.bmps_cfg_unit not in BMPS_CFG_UNITS:
                raise ValueError('BMPs configuration unit MUST be '
                                 'one of %s' % BMPS_CFG_UNITS.__str__())
            if not isinstance(unitcfg, dict):
                raise ValueError(
                    'The value of BMPs configuration unit MUST be dict value!')
            for cfgname, cfgvalue in viewitems(unitcfg):
                for bmpid, bmpdict in viewitems(self.bmps_info):
                    if cfgname in bmpdict:
                        continue
                    self.bmps_info[bmpid][cfgname] = cfgvalue
            break

        if cf.has_option('BMPs', 'bmps_cfg_method'):
            self.bmps_cfg_method = cf.get('BMPs', 'bmps_cfg_method')
            if self.bmps_cfg_method not in BMPS_CFG_METHODS:
                print('BMPs configuration method MUST be one of %s' %
                      BMPS_CFG_METHODS.__str__())
                self.bmps_cfg_method = 'RAND'

        # Check the validation of configuration unit and method
        if self.bmps_cfg_method not in BMPS_CFG_PAIR.get(self.bmps_cfg_unit):
            raise ValueError('BMPs configuration method %s '
                             'is not supported on unit %s' %
                             (self.bmps_cfg_method, self.bmps_cfg_unit))

        # Optimize boundary of BMP configuration unit
        self.boundary_adaptive = False
        self.boundary_adaptive_threshs = None
        if cf.has_option('BMPs', 'bmps_cfg_units_opt'):
            self.boundary_adaptive = cf.getboolean('BMPs',
                                                   'bmps_cfg_units_opt')
        if cf.has_option('BMPs', 'boundary_adaptive_threshold'):
            tstr = cf.get('BMPs', 'boundary_adaptive_threshold')
            self.boundary_adaptive_threshs = StringClass.extract_numeric_values_from_string(
                tstr)
            if 0 not in self.boundary_adaptive_threshs:
                self.boundary_adaptive_threshs.append(
                    0)  # 0 means no adjustment of boundary
            for tmp_thresh in self.boundary_adaptive_threshs:
                if -1 * tmp_thresh not in self.boundary_adaptive_threshs:
                    self.boundary_adaptive_threshs.append(-1 * tmp_thresh)

        # 4. Parameters settings for specific optimization algorithm
        self.opt_mtd = method
        self.opt = None  # type: Union[ParseNSGA2Config, None]
        if self.opt_mtd == 'nsga2':
            self.opt = ParseNSGA2Config(
                cf, self.model.model_dir,
                'SA_NSGA2_%s_%s' % (self.bmps_cfg_unit, self.bmps_cfg_method))
        # Using the existed population derived from previous scenario optimization
        self.initial_byinput = cf.getboolean(self.opt_mtd.upper(), 'inputpopulation') if \
            cf.has_option(self.opt_mtd.upper(), 'inputpopulation') else False
        self.input_pareto_file = None
        self.input_pareto_gen = -1
        if cf.has_option(self.opt_mtd.upper(), 'paretofrontsfile'):
            self.input_pareto_file = cf.get(self.opt_mtd.upper(),
                                            'paretofrontsfile')
        if cf.has_option(self.opt_mtd.upper(), 'generationselected'):
            self.input_pareto_gen = cf.getint(self.opt_mtd.upper(),
                                              'generationselected')

        self.scenario_dir = self.opt.out_dir + os.path.sep + 'Scenarios'
        UtilClass.rmmkdir(self.scenario_dir)

        # 5. (Optional) Plot settings for matplotlib
        self.plot_cfg = PlotConfig(cf)
Example #21
0
    def __init__(self, cf):
        """Initialization."""
        # 1. Directories
        self.base_dir = None
        self.clim_dir = None
        self.spatial_dir = None
        self.observe_dir = None
        self.scenario_dir = None
        self.model_dir = None
        self.txt_db_dir = None
        self.preproc_script_dir = None
        self.seims_bin = None
        self.mpi_bin = None
        self.workspace = None
        # 1.1. Directory determined flags
        self.use_observed = True
        self.use_scernario = True
        # 2. MongoDB configuration and database, collation, GridFS names
        self.hostname = '127.0.0.1'  # localhost by default
        self.port = 27017
        self.climate_db = ''
        self.bmp_scenario_db = ''
        self.spatial_db = ''
        # 3. Climate inputs
        self.hydro_climate_vars = None
        self.prec_sites = None
        self.prec_data = None
        self.Meteo_sites = None
        self.Meteo_data = None
        self.thiessen_field = 'ID'
        # 4. Spatial inputs
        self.prec_sites_thiessen = None
        self.meteo_sites_thiessen = None
        self.dem = None
        self.outlet_file = None
        self.landuse = None
        self.landcover_init_param = None
        self.soil = None
        self.soil_property = None
        self.fields_partition = False
        self.fields_partition_thresh = list()
        self.additional_rs = dict()
        # 5. Option parameters
        self.d8acc_threshold = 0
        self.np = 4
        self.d8down_method = 's'
        self.dorm_hr = -1.
        self.temp_base = 0.
        self.imper_perc_in_urban = 0.
        self.default_landuse = -1
        self.default_soil = -1
        # 1. Directories
        if 'PATH' in cf.sections():
            self.base_dir = cf.get('PATH', 'base_data_dir')
            self.clim_dir = cf.get('PATH', 'climate_data_dir')
            self.spatial_dir = cf.get('PATH', 'spatial_data_dir')
            self.observe_dir = cf.get('PATH', 'measurement_data_dir')
            self.scenario_dir = cf.get('PATH', 'bmp_data_dir')
            self.model_dir = cf.get('PATH', 'model_dir')
            self.txt_db_dir = cf.get('PATH', 'txt_db_dir')
            self.preproc_script_dir = cf.get('PATH', 'preproc_script_dir')
            self.seims_bin = cf.get('PATH', 'cpp_program_dir')
            self.mpi_bin = cf.get('PATH', 'mpiexec_dir')
            self.workspace = cf.get('PATH', 'working_dir')
        else:
            raise ValueError('[PATH] section MUST be existed in *.ini file.')
        if not (FileClass.is_dir_exists(self.base_dir)
                and FileClass.is_dir_exists(self.model_dir)
                and FileClass.is_dir_exists(self.txt_db_dir)
                and FileClass.is_dir_exists(self.preproc_script_dir)
                and FileClass.is_dir_exists(self.seims_bin)):
            raise IOError(
                'Please Check Directories defined in [PATH]. '
                'BASE_DATA_DIR, MODEL_DIR, TXT_DB_DIR, PREPROC_SCRIPT_DIR, '
                'and CPP_PROGRAM_DIR are required!')
        if not FileClass.is_dir_exists(self.mpi_bin):
            self.mpi_bin = None
        if not FileClass.is_dir_exists(self.workspace):
            try:  # first try to make dirs
                UtilClass.mkdir(self.workspace)
                # os.mkdir(self.workspace)
            except OSError as exc:
                self.workspace = self.model_dir + os.path.sep + 'preprocess_output'
                print('WARNING: Make WORKING_DIR failed! Use the default: %s' %
                      self.workspace)
                if not os.path.exists(self.workspace):
                    UtilClass.mkdir(self.workspace)

        self.dirs = DirNameUtils(self.workspace)
        self.logs = LogNameUtils(self.dirs.log)
        self.vecs = VectorNameUtils(self.dirs.geoshp)
        self.taudems = TauDEMFilesUtils(self.dirs.taudem)
        self.spatials = SpatialNamesUtils(self.dirs.geodata2db)
        self.modelcfgs = ModelCfgUtils(self.model_dir)
        self.paramcfgs = ModelParamDataUtils(self.preproc_script_dir +
                                             os.path.sep + 'database')

        if not FileClass.is_dir_exists(self.clim_dir):
            print(
                'The CLIMATE_DATA_DIR is not existed, try the default folder name "climate".'
            )
            self.clim_dir = self.base_dir + os.path.sep + 'climate'
            if not FileClass.is_dir_exists(self.clim_dir):
                raise IOError(
                    'Directories named "climate" MUST BE located in [base_dir]!'
                )

        if not FileClass.is_dir_exists(self.spatial_dir):
            print(
                'The SPATIAL_DATA_DIR is not existed, try the default folder name "spatial".'
            )
            self.spatial_dir = self.base_dir + os.path.sep + 'spatial'
            raise IOError(
                'Directories named "spatial" MUST BE located in [base_dir]!')

        if not FileClass.is_dir_exists(self.observe_dir):
            self.observe_dir = None
            self.use_observed = False

        if not FileClass.is_dir_exists(self.scenario_dir):
            self.scenario_dir = None
            self.use_scernario = False

        # 2. MongoDB related
        if 'MONGODB' in cf.sections():
            self.hostname = cf.get('MONGODB', 'hostname')
            self.port = cf.getint('MONGODB', 'port')
            self.climate_db = cf.get('MONGODB', 'climatedbname')
            self.bmp_scenario_db = cf.get('MONGODB', 'bmpscenariodbname')
            self.spatial_db = cf.get('MONGODB', 'spatialdbname')
        else:
            raise ValueError(
                '[MONGODB] section MUST be existed in *.ini file.')
        if not StringClass.is_valid_ip_addr(self.hostname):
            raise ValueError('HOSTNAME illegal defined in [MONGODB]!')

        # 3. Climate Input
        if 'CLIMATE' in cf.sections():
            self.hydro_climate_vars = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'hydroclimatevarfile')
            self.prec_sites = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'precsitefile')
            self.prec_data = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'precdatafile')
            self.Meteo_sites = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'meteositefile')
            self.Meteo_data = self.clim_dir + os.path.sep + cf.get(
                'CLIMATE', 'meteodatafile')
            self.thiessen_field = cf.get('CLIMATE', 'thiessenidfield')
        else:
            raise ValueError(
                'Climate input file names MUST be provided in [CLIMATE]!')

        # 4. Spatial Input
        if 'SPATIAL' in cf.sections():
            self.prec_sites_thiessen = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'precsitesthiessen')
            self.meteo_sites_thiessen = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'meteositesthiessen')
            self.dem = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'dem')
            self.outlet_file = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'outlet_file')
            if not os.path.exists(self.outlet_file):
                self.outlet_file = None
            self.landuse = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'landusefile')
            self.landcover_init_param = self.txt_db_dir + os.path.sep + cf.get(
                'SPATIAL', 'landcoverinitfile')
            self.soil = self.spatial_dir + os.path.sep + cf.get(
                'SPATIAL', 'soilseqnfile')
            self.soil_property = self.txt_db_dir + os.path.sep + cf.get(
                'SPATIAL', 'soilseqntext')
            if cf.has_option('SPATIAL', 'additionalfile'):
                additional_dict_str = cf.get('SPATIAL', 'additionalfile')
                tmpdict = json.loads(additional_dict_str)
                tmpdict = {
                    str(k): (str(v) if is_string(v) else v)
                    for k, v in list(tmpdict.items())
                }
                for k, v in list(tmpdict.items()):
                    # Existence check has been moved to mask_origin_delineated_data()
                    #  in sp_delineation.py
                    self.additional_rs[k] = v
            # Field partition
            if cf.has_option('SPATIAL', 'field_partition_thresh'):
                ths = cf.get('SPATIAL', 'field_partition_thresh')
                thsv = StringClass.extract_numeric_values_from_string(ths)
                if thsv is not None:
                    self.fields_partition_thresh = [int(v) for v in thsv]
                    self.fields_partition = True
        else:
            raise ValueError(
                'Spatial input file names MUST be provided in [SPATIAL]!')

        # 5. Optional parameters
        if 'OPTIONAL_PARAMETERS' in cf.sections():
            self.d8acc_threshold = cf.getfloat('OPTIONAL_PARAMETERS',
                                               'd8accthreshold')
            self.np = cf.getint('OPTIONAL_PARAMETERS', 'np')
            self.d8down_method = cf.get('OPTIONAL_PARAMETERS', 'd8downmethod')
            if StringClass.string_match(self.d8down_method, 'surface'):
                self.d8down_method = 's'
            elif StringClass.string_match(self.d8down_method, 'horizontal'):
                self.d8down_method = 'h'
            elif StringClass.string_match(self.d8down_method, 'pythagoras'):
                self.d8down_method = 'p'
            elif StringClass.string_match(self.d8down_method, 'vertical'):
                self.d8down_method = 'v'
            else:
                self.d8down_method = self.d8down_method.lower()
                if self.d8down_method not in ['s', 'h', 'p', 'v']:
                    self.d8down_method = 's'
            self.dorm_hr = cf.getfloat('OPTIONAL_PARAMETERS', 'dorm_hr')
            self.temp_base = cf.getfloat('OPTIONAL_PARAMETERS', 't_base')
            self.imper_perc_in_urban = cf.getfloat(
                'OPTIONAL_PARAMETERS', 'imperviouspercinurbancell')
            self.default_landuse = cf.getint('OPTIONAL_PARAMETERS',
                                             'defaultlanduse')
            self.default_soil = cf.getint('OPTIONAL_PARAMETERS', 'defaultsoil')
Example #22
0
    def __init__(self, cf):
        """Initialization."""
        # 1. Directories
        self.base_dir = None
        self.clim_dir = None
        self.spatial_dir = None
        self.observe_dir = None
        self.scenario_dir = None
        self.model_dir = None
        self.txt_db_dir = None
        self.preproc_script_dir = None
        self.seims_bin = None
        self.mpi_bin = None
        self.workspace = None
        # 1.1. Directory determined flags
        self.use_observed = True
        self.use_scernario = True
        # 2. MongoDB configuration and database, collation, GridFS names
        self.hostname = '127.0.0.1'  # localhost by default
        self.port = 27017
        self.climate_db = ''
        self.bmp_scenario_db = ''
        self.spatial_db = ''
        # 3. Switch for building SEIMS. These switches should be removed! By lj.
        # self.gen_cn = True
        # self.gen_runoff_coef = True
        # self.gen_crop = True
        # self.gen_iuh = True
        # 4. Climate inputs
        self.hydro_climate_vars = None
        self.prec_sites = None
        self.prec_data = None
        self.Meteo_sites = None
        self.Meteo_data = None
        self.thiessen_field = 'ID'
        # 5. Spatial inputs
        self.prec_sites_thiessen = None
        self.meteo_sites_thiessen = None
        self.dem = None
        self.outlet_file = None
        self.landuse = None
        self.landcover_init_param = None
        self.soil = None
        self.soil_property = None
        self.fields_partition = False
        self.fields_partition_thresh = list()
        self.additional_rs = dict()
        # 6. Option parameters
        self.d8acc_threshold = 0
        self.np = 4
        self.d8down_method = 's'
        self.dorm_hr = -1.
        self.temp_base = 0.
        self.imper_perc_in_urban = 0.
        self.default_landuse = -1
        self.default_soil = -1
        # 1. Directories
        if 'PATH' in cf.sections():
            self.base_dir = cf.get('PATH', 'base_data_dir')
            self.clim_dir = cf.get('PATH', 'climate_data_dir')
            self.spatial_dir = cf.get('PATH', 'spatial_data_dir')
            self.observe_dir = cf.get('PATH', 'measurement_data_dir')
            self.scenario_dir = cf.get('PATH', 'bmp_data_dir')
            self.model_dir = cf.get('PATH', 'model_dir')
            self.txt_db_dir = cf.get('PATH', 'txt_db_dir')
            self.preproc_script_dir = cf.get('PATH', 'preproc_script_dir')
            self.seims_bin = cf.get('PATH', 'cpp_program_dir')
            self.mpi_bin = cf.get('PATH', 'mpiexec_dir')
            self.workspace = cf.get('PATH', 'working_dir')
        else:
            raise ValueError('[PATH] section MUST be existed in *.ini file.')
        if not (FileClass.is_dir_exists(self.base_dir)
                and FileClass.is_dir_exists(self.model_dir)
                and FileClass.is_dir_exists(self.txt_db_dir)
                and FileClass.is_dir_exists(self.preproc_script_dir)
                and FileClass.is_dir_exists(self.seims_bin)):
            raise IOError('Please Check Directories defined in [PATH]. '
                          'BASE_DATA_DIR, MODEL_DIR, TXT_DB_DIR, PREPROC_SCRIPT_DIR, '
                          'and CPP_PROGRAM_DIR are required!')
        if not FileClass.is_dir_exists(self.mpi_bin):
            self.mpi_bin = None
        if not FileClass.is_dir_exists(self.workspace):
            try:  # first try to make dirs
                UtilClass.mkdir(self.workspace)
                # os.mkdir(self.workspace)
            except OSError as exc:
                self.workspace = self.model_dir + os.path.sep + 'preprocess_output'
                print('WARNING: Make WORKING_DIR failed: %s. '
                      'Use the default: %s' % (exc.message, self.workspace))
                if not os.path.exists(self.workspace):
                    UtilClass.mkdir(self.workspace)

        self.dirs = DirNameUtils(self.workspace)
        self.logs = LogNameUtils(self.dirs.log)
        self.vecs = VectorNameUtils(self.dirs.geoshp)
        self.taudems = TauDEMFilesUtils(self.dirs.taudem)
        self.spatials = SpatialNamesUtils(self.dirs.geodata2db)
        self.modelcfgs = ModelCfgUtils(self.model_dir)
        self.paramcfgs = ModelParamDataUtils(self.preproc_script_dir + os.path.sep + 'database')

        if not FileClass.is_dir_exists(self.clim_dir):
            print('The CLIMATE_DATA_DIR is not existed, try the default folder name "climate".')
            self.clim_dir = self.base_dir + os.path.sep + 'climate'
            if not FileClass.is_dir_exists(self.clim_dir):
                raise IOError('Directories named "climate" MUST BE located in [base_dir]!')

        if not FileClass.is_dir_exists(self.spatial_dir):
            print('The SPATIAL_DATA_DIR is not existed, try the default folder name "spatial".')
            self.spatial_dir = self.base_dir + os.path.sep + 'spatial'
            raise IOError('Directories named "spatial" MUST BE located in [base_dir]!')

        if not FileClass.is_dir_exists(self.observe_dir):
            self.observe_dir = None
            self.use_observed = False

        if not FileClass.is_dir_exists(self.scenario_dir):
            self.scenario_dir = None
            self.use_scernario = False

        # 2. MongoDB related
        if 'MONGODB' in cf.sections():
            self.hostname = cf.get('MONGODB', 'hostname')
            self.port = cf.getint('MONGODB', 'port')
            self.climate_db = cf.get('MONGODB', 'climatedbname')
            self.bmp_scenario_db = cf.get('MONGODB', 'bmpscenariodbname')
            self.spatial_db = cf.get('MONGODB', 'spatialdbname')
        else:
            raise ValueError('[MONGODB] section MUST be existed in *.ini file.')
        if not StringClass.is_valid_ip_addr(self.hostname):
            raise ValueError('HOSTNAME illegal defined in [MONGODB]!')

        # 3. Model related switch. The SWITCH section should be removed! By lj.
        # by default, OpenMP version and daily (longterm) mode will be built
        # if 'SWITCH' in cf.sections():
        #     self.gen_cn = cf.getboolean('SWITCH', 'gencn')
        #     self.gen_runoff_coef = cf.getboolean('SWITCH', 'genrunoffcoef')
        #     self.gen_crop = cf.getboolean('SWITCH', 'gencrop')
        #
        # if self.storm_mode:
        #     self.gen_iuh = False
        #     self.climate_db = ModelNameUtils.standardize_climate_dbname(self.climate_db)

        # 4. Climate Input
        if 'CLIMATE' in cf.sections():
            self.hydro_climate_vars = self.clim_dir + os.path.sep + cf.get('CLIMATE',
                                                                           'hydroclimatevarfile')
            self.prec_sites = self.clim_dir + os.path.sep + cf.get('CLIMATE', 'precsitefile')
            self.prec_data = self.clim_dir + os.path.sep + cf.get('CLIMATE', 'precdatafile')
            self.Meteo_sites = self.clim_dir + os.path.sep + cf.get('CLIMATE', 'meteositefile')
            self.Meteo_data = self.clim_dir + os.path.sep + cf.get('CLIMATE', 'meteodatafile')
            self.thiessen_field = cf.get('CLIMATE', 'thiessenidfield')
        else:
            raise ValueError('Climate input file names MUST be provided in [CLIMATE]!')

        # 5. Spatial Input
        if 'SPATIAL' in cf.sections():
            self.prec_sites_thiessen = self.spatial_dir + os.path.sep + cf.get('SPATIAL',
                                                                               'precsitesthiessen')
            self.meteo_sites_thiessen = self.spatial_dir + os.path.sep + cf.get('SPATIAL',
                                                                                'meteositesthiessen')
            self.dem = self.spatial_dir + os.path.sep + cf.get('SPATIAL', 'dem')
            self.outlet_file = self.spatial_dir + os.path.sep + cf.get('SPATIAL', 'outlet_file')
            if not os.path.exists(self.outlet_file):
                self.outlet_file = None
            self.landuse = self.spatial_dir + os.path.sep + cf.get('SPATIAL', 'landusefile')
            self.landcover_init_param = self.txt_db_dir + os.path.sep + cf.get('SPATIAL',
                                                                               'landcoverinitfile')
            self.soil = self.spatial_dir + os.path.sep + cf.get('SPATIAL', 'soilseqnfile')
            self.soil_property = self.txt_db_dir + os.path.sep + cf.get('SPATIAL', 'soilseqntext')
            if cf.has_option('SPATIAL', 'additionalfile'):
                additional_dict_str = cf.get('SPATIAL', 'additionalfile')
                tmpdict = json.loads(additional_dict_str)
                tmpdict = {str(k): (str(v) if isinstance(v, str) else v) for k, v in
                           list(tmpdict.items())}
                for k, v in list(tmpdict.items()):
                    # Existence check has been moved to mask_origin_delineated_data()
                    #  in sp_delineation.py
                    self.additional_rs[k] = v
            # Field partition
            if cf.has_option('SPATIAL', 'field_partition_thresh'):
                ths = cf.get('SPATIAL', 'field_partition_thresh')
                thsv = StringClass.extract_numeric_values_from_string(ths)
                if thsv is not None:
                    self.fields_partition_thresh = [int(v) for v in thsv]
                    self.fields_partition = True
        else:
            raise ValueError('Spatial input file names MUST be provided in [SPATIAL]!')

        # 6. Option parameters
        if 'OPTIONAL_PARAMETERS' in cf.sections():
            self.d8acc_threshold = cf.getfloat('OPTIONAL_PARAMETERS', 'd8accthreshold')
            self.np = cf.getint('OPTIONAL_PARAMETERS', 'np')
            self.d8down_method = cf.get('OPTIONAL_PARAMETERS', 'd8downmethod')
            if StringClass.string_match(self.d8down_method, 'surface'):
                self.d8down_method = 's'
            elif StringClass.string_match(self.d8down_method, 'horizontal'):
                self.d8down_method = 'h'
            elif StringClass.string_match(self.d8down_method, 'pythagoras'):
                self.d8down_method = 'p'
            elif StringClass.string_match(self.d8down_method, 'vertical'):
                self.d8down_method = 'v'
            else:
                self.d8down_method = self.d8down_method.lower()
                if self.d8down_method not in ['s', 'h', 'p', 'v']:
                    self.d8down_method = 'h'
            self.dorm_hr = cf.getfloat('OPTIONAL_PARAMETERS', 'dorm_hr')
            self.temp_base = cf.getfloat('OPTIONAL_PARAMETERS', 't_base')
            self.imper_perc_in_urban = cf.getfloat('OPTIONAL_PARAMETERS',
                                                   'imperviouspercinurbancell')
            self.default_landuse = cf.getint('OPTIONAL_PARAMETERS', 'defaultlanduse')
            self.default_soil = cf.getint('OPTIONAL_PARAMETERS', 'defaultsoil')
Example #23
0
def read_pareto_points_from_txt(txt_file, sce_name, headers, labels=None):
    # type: (AnyStr, AnyStr, List[AnyStr], Optional[List[AnyStr]]) -> (Dict[int, Union[List, numpy.ndarray]], Dict[int, int])
    """Read Pareto points from `runtime.log` file.

    Args:
        txt_file: Full file path of `runtime.log` output by NSGA2 algorithm.
        sce_name: Field name followed by `generation`, e.g., 'calibrationID', 'scenarioID', etc.
        headers: Filed names in header for each dimension of Pareto front
        labels: (Optional) Labels corresponding to `headers` for Pareto graphs

    Returns:
        pareto_points: `OrderedDict`, key is generation ID, value is Pareto front array
        pareto_popnum: `OrderedDict`, key is generation ID, value is newly model runs number
    """
    with open(txt_file, 'r', encoding='utf-8') as f:
        lines = f.readlines()
    pareto_points = OrderedDict()
    pareto_popnum = OrderedDict()
    found = False
    cur_gen = -1
    iden_idx = -1

    new_headers = headers[:]
    for i, hd in enumerate(new_headers):
        new_headers[i] = hd.upper()
    if labels is None:
        labels = headers[:]

    headers_idx = list()
    new_labels = list()

    for lno, line in enumerate(lines):
        str_line = line
        for LF in LFs:
            if LF in line:
                str_line = line.split(LF)[0]
                break
        if str_line == '':
            continue
        values = StringClass.extract_numeric_values_from_string(str_line)
        # Check generation
        if str_line[0] == '#' and 'Generation' in str_line:
            if len(values) != 1:
                continue
            # e.g., ###### Generation: 23 ######
            gen = int(values[0])
            found = True
            cur_gen = gen
            pareto_popnum[cur_gen] = list()
            pareto_points[cur_gen] = list()
            continue
        if not found:  # If the first "###### Generation: 1 ######" has not been found.
            continue
        line_list = StringClass.split_string(str_line.upper(), ['\t'])
        if values is None:  # means header line
            if headers_idx and new_labels:
                continue
            for idx, v in enumerate(line_list):
                if sce_name.upper() in v.upper():
                    iden_idx = idx
                    break
            for fldno, fld in enumerate(new_headers):
                if fld in line_list:
                    tmpidx = line_list.index(fld)
                    headers_idx.append(tmpidx)
                    new_labels.append(labels[fldno])
            continue
        if iden_idx < 0:
            continue
        # now append the real Pareto front point data
        tmpvalues = list()
        for tmpidx in headers_idx:
            tmpvalues.append(StringClass.extract_numeric_values_from_string(line_list[tmpidx])[0])
        pareto_points[cur_gen].append(tmpvalues[:])
        iden_str = line_list[iden_idx]  # e.g., 1-44
        iden_strs = iden_str.split('-')
        if len(iden_strs) == 1:
            pareto_popnum[cur_gen].append(int(iden_strs[0]))
        if len(iden_strs) == 2:
            pareto_popnum.setdefault(int(iden_strs[0]), list())
            pareto_popnum[int(iden_strs[0])].append(int(iden_strs[1]))

    return pareto_points, pareto_popnum
def plot_hypervolume(method_paths, ws, cn=False):
    """Plot hypervolume of multiple optimization methods

    Args:
        method_paths: Dict, key is method name and value is full path of the directory
        ws: Full path of the destination directory
        cn: (Optional) Use Chinese
    """
    hyperv = OrderedDict()
    for k, v in list(method_paths.items()):
        v = v + os.path.sep + 'hypervolume.txt'
        x = list()
        y = list()
        with open(v, 'r') as f:
            lines = f.readlines()
        for line in lines:
            values = StringClass.extract_numeric_values_from_string(line)
            if values is None:
                continue
            if len(values) < 2:
                continue
            x.append(int(values[0]))
            y.append(values[-1])

        if len(x) == len(y) > 0:
            hyperv[k] = [x[:], y[:]]
    plt.rcParams['xtick.direction'] = 'out'
    plt.rcParams['ytick.direction'] = 'out'
    plt.rcParams['font.family'] = 'Times New Roman'
    generation_str = 'Generation'
    hyperv_str = 'Hypervolume index'
    if cn:
        plt.rcParams['font.family'] = 'SimSun'  # 宋体
        generation_str = u'进化代数'
        hyperv_str = u'Hypervolume 指数'
    linestyles = ['-', '--', '-.', ':']
    # plot accumulate pop size
    fig, ax = plt.subplots(figsize=(10, 8))
    mark_idx = 0
    for method, gen_hyperv in hyperv.items():
        xdata = gen_hyperv[0]
        ydata = gen_hyperv[1]
        plt.plot(xdata,
                 ydata,
                 linestyle=linestyles[mark_idx],
                 color='black',
                 label=method,
                 linewidth=2)
        mark_idx += 1
    plt.legend(fontsize=24, loc=2)
    xaxis = plt.gca().xaxis
    yaxis = plt.gca().yaxis
    for xlebal in xaxis.get_ticklabels():
        xlebal.set_fontsize(20)
    for ylebal in yaxis.get_ticklabels():
        ylebal.set_fontsize(20)
    plt.xlabel(generation_str, fontsize=20)
    plt.ylabel(hyperv_str, fontsize=20)
    ax.set_xlim(left=0, right=ax.get_xlim()[1] + 2)
    plt.tight_layout()
    save_png_eps(plt, ws, 'hypervolume')
    # close current plot in case of 'figure.max_open_warning'
    plt.cla()
    plt.clf()
    plt.close()