예제 #1
0
 def iuh(cfg, n_subbasins):
     """Invoke IUH program"""
     dt = 24
     str_cmd = '"%s/iuh" %s %d %s %s %s %d' % (
         cfg.seims_bin, cfg.hostname, cfg.port, cfg.spatial_db,
         DBTableNames.gridfs_spatial, dt, n_subbasins)
     UtilClass.run_command(str_cmd)
예제 #2
0
 def grid_layering(cfg, n_subbasins):
     """Invoke grid layering program."""
     layering_dir = cfg.dirs.layerinfo
     UtilClass.rmmkdir(layering_dir)
     str_cmd = '"%s/grid_layering" %s %d %s %s %s %d' % (
         cfg.seims_bin, cfg.hostname, cfg.port, layering_dir,
         cfg.spatial_db, DBTableNames.gridfs_spatial, n_subbasins)
     UtilClass.run_command(str_cmd)
 def grid_layering(cfg, n_subbasins):
     """Invoke grid layering program."""
     layering_dir = cfg.dirs.layerinfo
     UtilClass.rmmkdir(layering_dir)
     str_cmd = '"%s/grid_layering" %s %d %s %s %s %d' % (
         cfg.seims_bin, cfg.hostname, cfg.port, layering_dir,
         cfg.spatial_db, DBTableNames.gridfs_spatial, n_subbasins)
     UtilClass.run_command(str_cmd)
예제 #4
0
 def iuh(cfg, n_subbasins):
     """Invoke IUH program"""
     dt = 24
     str_cmd = '"%s/iuh" %s %d %s %s %s %d' % (cfg.seims_bin, cfg.hostname, cfg.port,
                                               cfg.spatial_db,
                                               DBTableNames.gridfs_spatial,
                                               dt, n_subbasins)
     # print(str_cmd)
     UtilClass.run_command(str_cmd)
예제 #5
0
 def spatial_rasters(cfg, subbasin_num):
     """Import spatial raster data."""
     if subbasin_num == 0:  # the whole basin!
         subbasin_file = cfg.spatials.mask
     else:
         subbasin_file = cfg.spatials.subbsn
     str_cmd = '"%s/import_raster" %s %s %s %s %s %d' % (
         cfg.seims_bin, subbasin_file, cfg.dirs.geodata2db, cfg.spatial_db,
         DBTableNames.gridfs_spatial, cfg.hostname, cfg.port)
     UtilClass.run_command(str_cmd)
예제 #6
0
파일: vector.py 프로젝트: giserh/PyGeoC
 def convert2geojson(jsonfile, src_srs, dst_srs, src_file):
     """convert shapefile to geojson file"""
     if os.path.exists(jsonfile):
         os.remove(jsonfile)
     if sysstr == 'Windows':
         exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix
     else:
         exepath = FileClass.get_executable_fullpath('ogr2ogr')
     # os.system(s)
     s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % (
         exepath, src_srs, dst_srs, jsonfile, src_file)
     UtilClass.run_command(s)
예제 #7
0
파일: vector.py 프로젝트: crazyzlj/PyGeoC
 def convert2geojson(jsonfile, src_srs, dst_srs, src_file):
     """convert shapefile to geojson file"""
     if os.path.exists(jsonfile):
         os.remove(jsonfile)
     if sysstr == 'Windows':
         exepath = '"%s/Lib/site-packages/osgeo/ogr2ogr"' % sys.exec_prefix
     else:
         exepath = FileClass.get_executable_fullpath('ogr2ogr')
     # os.system(s)
     s = '%s -f GeoJSON -s_srs "%s" -t_srs %s %s %s' % (
         exepath, src_srs, dst_srs, jsonfile, src_file)
     UtilClass.run_command(s)
예제 #8
0
 def mask_raster_cpp(bin_dir, maskfile, originalfiles, outputfiles, default_values, configfile):
     """Call mask_raster program (cpp version) to mask raster"""
     # write mask configuration file
     n = len(originalfiles)
     # write mask config file
     with open(configfile, 'w', encoding='utf-8') as f:
         f.write('%s\n' % maskfile)
         f.write('%d\n' % (n,))
         for i in range(n):
             s = '%s\t%d\t%s\n' % (originalfiles[i], default_values[i], outputfiles[i])
             f.write('%s' % s)
     # run command
     UtilClass.run_command('"%s/mask_raster" %s' % (bin_dir, configfile))
예제 #9
0
 def mask_raster_cpp(bin_dir, maskfile, originalfiles, outputfiles, default_values, configfile):
     """Call mask_raster program (cpp version) to mask raster"""
     # write mask configuration file
     n = len(originalfiles)
     # write mask config file
     with open(configfile, 'w') as f:
         f.write(maskfile + '\n')
         f.write('%d\n' % (n,))
         for i in range(n):
             s = '%s\t%d\t%s\n' % (originalfiles[i], default_values[i], outputfiles[i])
             f.write(s)
     # run command
     UtilClass.run_command('"%s/mask_raster" %s' % (bin_dir, configfile))
예제 #10
0
def run_field_partition(bin_dir,
                        maskf,
                        streamf,
                        flowf,
                        luf,
                        demf,
                        thresh,
                        arcgis_code=True):
    """Run fieldpartition program."""
    cmd_str = '"%s/fieldpartition" -mask %s -stream %s -flow %s' \
              ' -lu %s -dem %s -t %d' % (bin_dir, maskf, streamf, flowf, luf, demf, thresh)
    if arcgis_code:
        cmd_str += ' -arcgis'
    UtilClass.run_command(cmd_str)
예제 #11
0
 def run(self):
     """Run SEIMS model"""
     stime = time.time()
     if not os.path.isdir(self.OutputDirectory) or not os.path.exists(
             self.OutputDirectory):
         os.makedirs(self.OutputDirectory)
     # If the input time period is not consistent with the predefined time period in FILE_IN.
     if self.simu_stime and self.simu_etime and self.simu_stime != self.start_time \
         and self.simu_etime != self.end_time:
         self.ResetSimulationPeriod()
     # If the output time period is specified, reset the time period of all output IDs
     if self.out_stime and self.out_etime:
         self.ResetOutputsPeriod(self.OutputIDs, self.out_stime,
                                 self.out_etime)
     try:
         self.runlogs = UtilClass.run_command(self.Command)
         with open(self.OutputDirectory + os.sep + 'runlogs.txt',
                   'w',
                   encoding='utf-8') as f:
             f.write('\n'.join(self.runlogs))
         self.ParseTimespan(self.runlogs)
         self.run_success = True
     except CalledProcessError or IOError or Exception as err:
         # 1. SEIMS-based model running failed
         # 2. The OUTPUT directory was not been created successfully by SEIMS-based model
         # 3. Other unpredictable errors
         print('Run SEIMS model failed! %s' % str(err))
         self.run_success = False
     self.runtime = time.time() - stime
     return self.run_success
예제 #12
0
 def reclassify_landuse_parameters(bin_dir, config_file, dst_dir, landuse_file, lookup_dir,
                                   landuse_attr_list, default_landuse_id):
     """
     Reclassify landuse parameters by lookup table.
     TODO(LJ): this function should be replaced by replaceByDict() function!
     """
     # prepare reclassify configuration file
     with open(config_file, 'w') as f_reclass_lu:
         f_reclass_lu.write("%s\t%d\n" % (landuse_file, default_landuse_id))
         f_reclass_lu.write("%s\n" % lookup_dir)
         f_reclass_lu.write(dst_dir + "\n")
         n = len(landuse_attr_list)
         f_reclass_lu.write("%d\n" % n)
         f_reclass_lu.write("\n".join(landuse_attr_list))
     s = '"%s/reclassify" %s' % (bin_dir, config_file)
     UtilClass.run_command(s)
예제 #13
0
 def reclassify_landuse_parameters(bin_dir, config_file, dst_dir, landuse_file, lookup_dir,
                                   landuse_attr_list, default_landuse_id):
     """
     Reclassify landuse parameters by lookup table.
     TODO(LJ): this function should be replaced by replaceByDict() function!
     """
     # prepare reclassify configuration file
     with open(config_file, 'w', encoding='utf-8') as f_reclass_lu:
         f_reclass_lu.write('%s\t%d\n' % (landuse_file, default_landuse_id))
         f_reclass_lu.write('%s\n' % lookup_dir)
         f_reclass_lu.write(dst_dir + "\n")
         n = len(landuse_attr_list)
         f_reclass_lu.write('%d\n' % n)
         f_reclass_lu.write('\n'.join(landuse_attr_list))
     s = '"%s/reclassify" %s' % (bin_dir, config_file)
     UtilClass.run_command(s)
예제 #14
0
    def spatial_rasters(cfg, subbasin_num):
        """Import spatial raster data."""
        if subbasin_num == 0:  # the whole basin!
            start_id = 0
            subbasin_file = cfg.spatials.mask
        else:
            start_id = 1
            subbasin_file = cfg.spatials.subbsn
        str_cmd = '"%s/import_raster" %s %s %s %s %s %d' % (
            cfg.seims_bin, subbasin_file, cfg.dirs.geodata2db, cfg.spatial_db,
            DBTableNames.gridfs_spatial, cfg.hostname, cfg.port)

        # I recommend not output to directory. lj
        # UtilClass.mkdir(cfg.dirs.import2db)
        # for i in range(start_id, subbasin_num + 1):
        #     subdir = cfg.dirs.import2db + os.path.sep + str(i)
        #     UtilClass.rmmkdir(subdir)
        # str_cmd = '%s %s' % (str_cmd, cfg.dirs.import2db)
        UtilClass.run_command(str_cmd)
예제 #15
0
 def raster2shp(rasterfile, vectorshp, layername=None, fieldname=None):
     """Convert raster to ESRI shapefile"""
     FileClass.remove_files(vectorshp)
     FileClass.check_file_exists(rasterfile)
     # raster to polygon vector
     exepath = FileClass.get_executable_fullpath("gdal_polygonize.py")
     str_cmd = 'python %s -f "ESRI Shapefile" %s %s' % (exepath, rasterfile, vectorshp)
     if layername is not None and fieldname is not None:
         str_cmd += ' %s %s' % (layername, fieldname)
     print (str_cmd)
     print (UtilClass.run_command(str_cmd))
예제 #16
0
    def field_partition(cfg):
        """Fields partition incorporating spatial topology.

        Refers to: Wu, Hui, A.-Xing Zhu, Jun-Zhi Liu, Yong-Bo Liu, and Jing-Chao Jiang. 2017.
                     "Best Management Practices Optimization at Watershed Scale: Incorporating
                      Spatial Topology among Fields." Water Resources Management,
                      doi: 10.1007/s11269-017-1801-8.
        """
        if not cfg.fields_partition:  # Do field partition
            return
        maskf = cfg.spatials.mask
        streamf = cfg.spatials.stream_link
        flowf = cfg.spatials.d8flow
        luf = cfg.spatials.landuse
        demf = cfg.spatials.filldem
        threshs = cfg.fields_partition_thresh
        for thresh in threshs:
            # run command
            UtilClass.run_command('"%s/fieldpartition" -mask %s -stream %s '
                                  '-flow %s -lu %s -dem %s -t %d' % (cfg.seims_bin, maskf, streamf,
                                                                     flowf, luf, demf, thresh))
예제 #17
0
    def spatial_rasters(cfg, subbasin_num):
        """Import spatial raster data."""
        if subbasin_num == 0:  # the whole basin!
            start_id = 0
            subbasin_file = cfg.spatials.mask
        else:
            start_id = 1
            subbasin_file = cfg.spatials.subbsn
        str_cmd = '"%s/import_raster" %s %s %s %s %s %d' % (cfg.seims_bin, subbasin_file,
                                                            cfg.dirs.geodata2db,
                                                            cfg.spatial_db,
                                                            DBTableNames.gridfs_spatial,
                                                            cfg.hostname, cfg.port)

        # I recommend not output to directory. lj
        # UtilClass.mkdir(cfg.dirs.import2db)
        # for i in range(start_id, subbasin_num + 1):
        #     subdir = cfg.dirs.import2db + os.path.sep + str(i)
        #     UtilClass.rmmkdir(subdir)
        # str_cmd = '%s %s' % (str_cmd, cfg.dirs.import2db)
        UtilClass.run_command(str_cmd)
예제 #18
0
파일: run_seims.py 프로젝트: crazyzlj/SEIMS
 def run(self):
     """Run SEIMS model"""
     stime = time.time()
     if not os.path.isdir(self.OutputDirectory) or not os.path.exists(self.OutputDirectory):
         os.makedirs(self.OutputDirectory)
     try:
         run_logs = UtilClass.run_command(self.Command)
         self.ParseTimespan(run_logs)
         self.run_success = True
     except CalledProcessError or Exception:
         print('Run SEIMS model failed!')
         self.run_success = False
     self.runtime = time.time() - stime
     return self.run_success
예제 #19
0
    def field_partition(cfg):
        """Fields partition incorporating spatial topology.

        Refers to: Wu, Hui, A-Xing Zhu, Junzhi Liu, Yongbo Liu, and Jingchao Jiang. 2018.
                     "Best Management Practices Optimization at Watershed Scale: Incorporating
                      Spatial Topology among Fields." Water Resources Management, 32(1):155-177,
                      doi: 10.1007/s11269-017-1801-8.
        """
        if not cfg.fields_partition:  # Do field partition
            return
        maskf = cfg.spatials.mask
        streamf = cfg.spatials.stream_link
        flowf = cfg.spatials.d8flow
        luf = cfg.spatials.landuse
        demf = cfg.spatials.filldem
        threshs = cfg.fields_partition_thresh
        for thresh in threshs:
            # run command
            # Note that the flowf is currently converted to ArcGIS flow direction code
            #   by `post_process_of_delineated_data` function.
            UtilClass.run_command(
                '"%s/fieldpartition" -mask %s -stream %s -flow %s -lu %s -dem %s '
                '-t %d -arcgis' %
                (cfg.seims_bin, maskf, streamf, flowf, luf, demf, thresh))
예제 #20
0
 def run(self):
     """Run SEIMS model"""
     stime = time.time()
     if not os.path.isdir(self.OutputDirectory) or not os.path.exists(
             self.OutputDirectory):
         os.makedirs(self.OutputDirectory)
     try:
         run_logs = UtilClass.run_command(self.Command)
         self.ParseTimespan(run_logs)
         self.run_success = True
     except CalledProcessError or Exception:
         print('Run SEIMS model failed!')
         self.run_success = False
     self.runtime = time.time() - stime
     return self.run_success
예제 #21
0
 def run(self):
     """Run SEIMS model"""
     stime = time.time()
     if not os.path.isdir(self.OutputDirectory) or not os.path.exists(
             self.OutputDirectory):
         os.makedirs(self.OutputDirectory)
     # If the input time period is not consistent with the predefined time period in FILE_IN.
     if self.simu_stime and self.simu_etime and self.simu_stime != self.start_time \
         and self.simu_etime != self.end_time:
         self.ResetSimulationPeriod()
     try:
         self.runlogs = UtilClass.run_command(self.Command)
         with open(self.OutputDirectory + os.sep + 'runlogs.txt',
                   'w',
                   encoding='utf-8') as f:
             f.write('\n'.join(self.runlogs))
         self.ParseTimespan(self.runlogs)
         self.run_success = True
     except CalledProcessError or Exception as err:
         print('Run SEIMS model failed! %s' % str(err))
         self.run_success = False
     self.runtime = time.time() - stime
     return self.run_success
예제 #22
0
    def run(function_name,
            in_files,
            wp=None,
            in_params=None,
            out_files=None,
            mpi_params=None,
            log_params=None):
        """
        Run TauDEM function.

           1. The command will not execute if any input file does not exist.
           2. An error will be detected after running the TauDEM command if
           any output file does not exist;

        Args:
            function_name (str): Full path of TauDEM function.
            in_files (dict, required): Dict of pairs of parameter id (string) and file path
                (string or list) for input files, e.g.::

                    {'-z': '/full/path/to/dem.tif'}

            wp (str, optional): Workspace for outputs. If not specified, the directory of the
                first input file in ``in_files`` will be used.
            in_params (dict, optional): Dict of pairs of parameter id (string) and value
                (or None for a flag parameter without a value) for input parameters, e.g.::

                    {'-nc': None}
                    {'-thresh': threshold}
                    {'-m': 'ave' 's', '-nc': None}

            out_files (dict, optional): Dict of pairs of parameter id (string) and file
                path (string or list) for output files, e.g.::

                    {'-fel': 'filleddem.tif'}
                    {'-maxS': ['harden.tif', 'maxsimi.tif']}

            mpi_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for MPI setting, e.g.::

                    {'mpipath':'/soft/bin','hostfile':'/soft/bin/cluster.node','n':4}
                    {'mpipath':'/soft/bin', 'n':4}
                    {'n':4}

            log_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for runtime and log output parameters. e.g.::

                    {'logfile': '/home/user/log.txt',
                     'runtimefile': '/home/user/runtime.txt'}

        Returns:
            True if TauDEM run successfully, otherwise False.
        """
        # Check input files
        if in_files is None:
            TauDEM.error('Input files parameter is required!')
        if not isinstance(in_files, dict):
            TauDEM.error('The input files parameter must be a dict!')
        for (pid, infile) in list(in_files.items()):
            if infile is None:
                continue
            if isinstance(infile, list) or isinstance(infile, tuple):
                for idx, inf in enumerate(infile):
                    if inf is None:
                        continue
                    inf, wp = TauDEM.check_infile_and_wp(inf, wp)
                    in_files[pid][idx] = inf
                continue
            if os.path.exists(infile):
                infile, wp = TauDEM.check_infile_and_wp(infile, wp)
                in_files[pid] = os.path.abspath(infile)
            else:
                # For more flexible input files extension.
                # e.g., -inputtags 1 <path/to/tag1.tif> 2 <path/to/tag2.tif> ...
                # in such unpredictable circumstance, we cannot check the existance of
                # input files, so the developer will check it in other place.
                if len(StringClass.split_string(infile, ' ')) > 1:
                    continue
                else:  # the infile still should be a existing file, so check in workspace
                    if wp is None:
                        TauDEM.error('Workspace should not be None!')
                    infile = wp + os.sep + infile
                    if not os.path.exists(infile):
                        TauDEM.error(
                            'Input files parameter %s: %s is not existed!' %
                            (pid, infile))
                    in_files[pid] = os.path.abspath(infile)
        # Make workspace dir if not existed
        UtilClass.mkdir(wp)
        # Check the log parameter
        log_file = None
        runtime_file = None
        if log_params is not None:
            if not isinstance(log_params, dict):
                TauDEM.error('The log parameter must be a dict!')
            if 'logfile' in log_params and log_params['logfile'] is not None:
                log_file = log_params['logfile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in log_file:
                    log_file = wp + os.sep + log_file
                    log_file = os.path.abspath(log_file)
            if 'runtimefile' in log_params and log_params[
                    'runtimefile'] is not None:
                runtime_file = log_params['runtimefile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in runtime_file:
                    runtime_file = wp + os.sep + runtime_file
                    runtime_file = os.path.abspath(runtime_file)

        # remove out_files to avoid any file IO related error
        new_out_files = list()
        if out_files is not None:
            if not isinstance(out_files, dict):
                TauDEM.error('The output files parameter must be a dict!')
            for (pid, out_file) in list(out_files.items()):
                if out_file is None:
                    continue
                if isinstance(out_file, list) or isinstance(out_file, tuple):
                    for idx, outf in enumerate(out_file):
                        if outf is None:
                            continue
                        outf = FileClass.get_file_fullpath(outf, wp)
                        FileClass.remove_files(outf)
                        out_files[pid][idx] = outf
                        new_out_files.append(outf)
                else:
                    out_file = FileClass.get_file_fullpath(out_file, wp)
                    FileClass.remove_files(out_file)
                    out_files[pid] = out_file
                    new_out_files.append(out_file)

        # concatenate command line
        commands = list()
        # MPI header
        if mpi_params is not None:
            if not isinstance(mpi_params, dict):
                TauDEM.error('The MPI settings parameter must be a dict!')
            if 'mpipath' in mpi_params and mpi_params['mpipath'] is not None:
                commands.append(mpi_params['mpipath'] + os.sep + 'mpiexec')
            else:
                commands.append('mpiexec')
            if 'hostfile' in mpi_params and mpi_params['hostfile'] is not None \
                    and not StringClass.string_match(mpi_params['hostfile'], 'none') \
                    and os.path.isfile(mpi_params['hostfile']):
                commands.append('-f')
                commands.append(mpi_params['hostfile'])
            if 'n' in mpi_params and mpi_params['n'] > 1:
                commands.append('-n')
                commands.append(str(mpi_params['n']))
            else:  # If number of processor is less equal than 1, then do not call mpiexec.
                commands = []
        # append TauDEM function name, which can be full path or just one name
        commands.append(function_name)
        # append input files
        for (pid, infile) in list(in_files.items()):
            if infile is None:
                continue
            if pid[0] != '-':
                pid = '-' + pid
            commands.append(pid)
            if isinstance(infile, list) or isinstance(infile, tuple):
                commands.append(' '.join(tmpf for tmpf in infile))
            else:
                commands.append(infile)
        # append input parameters
        if in_params is not None:
            if not isinstance(in_params, dict):
                TauDEM.error('The input parameters must be a dict!')
            for (pid, v) in list(in_params.items()):
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                # allow for parameter which is an flag without value
                if v != '' and v is not None:
                    if MathClass.isnumerical(v):
                        commands.append(str(v))
                    else:
                        commands.append(v)
        # append output parameters
        if out_files is not None:
            for (pid, outfile) in list(out_files.items()):
                if outfile is None:
                    continue
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                if isinstance(outfile, list) or isinstance(outfile, tuple):
                    commands.append(' '.join(tmpf for tmpf in outfile))
                else:
                    commands.append(outfile)
        # run command
        runmsg = UtilClass.run_command(commands)
        TauDEM.log(runmsg, log_file)
        TauDEM.output_runtime_to_log(function_name, runmsg, runtime_file)
        # Check out_files, raise RuntimeError if not exist.
        for of in new_out_files:
            if not os.path.exists(of):
                TauDEM.error('%s failed, and the %s was not generated!' %
                             (function_name, of))
                return False
        return True
예제 #23
0
            os.mkdir(tmp_ws)
        tmp_comb_dir = comb_dir + os.sep + fdir
        if not os.path.isdir(tmp_comb_dir):
            os.mkdir(tmp_comb_dir)
        tmp_ts_dir = tmp_ws + os.sep + fname
        if not os.path.isdir(tmp_ts_dir):
            os.mkdir(tmp_ts_dir)

        dstUrl = download_m3u8_resolution(tmp_ts_dir, furl, fname)
        if dstUrl is None:
            print("%s failed, can not download pre_output.m3u8\n" % fname)
            continue
        print(dstUrl)

        dstFile = download_actual_m3u8(tmp_ts_dir, dstUrl, fname)
        if dstFile is None:
            print("%s failed, can not download actual output.m3u8\n" % fname)
            continue
        print(dstFile)

        shutil.copy(key_file, tmp_ts_dir + os.sep + 'mykey.key')
        download_ts_files(tmp_ts_dir, dstFile, dstUrl)

        UtilClass.run_command([
            ffmpeg_bin, '-allowed_extensions', 'ALL', '-i',
            '%s/%s.m3u8' % (tmp_ts_dir, fname), '-bsf:a', 'aac_adtstoasc',
            '-vcodec', 'copy', '-c', 'copy', '-crf', '50',
            '%s/%s.mp4' % (tmp_comb_dir, fname)
        ])
        time.sleep(10)
예제 #24
0
파일: TauDEM.py 프로젝트: crazyzlj/PyGeoC
    def run(function_name, in_files, wp=None, in_params=None, out_files=None, mpi_params=None,
            log_params=None):
        """
        Run TauDEM function.

         - 1. The command will not execute if any input file does not exist.
         - 2. An error will be detected after running the TauDEM command if
              any output file does not exist;

        Args:
            function_name (str): Full path of TauDEM function.
            in_files (dict, required): Dict of pairs of parameter id (string) and file path
                (string or list) for input files, e.g.::

                    {'-z': '/full/path/to/dem.tif'}

            wp (str, optional): Workspace for outputs. If not specified, the directory of the
                first input file in ``in_files`` will be used.
            in_params (dict, optional): Dict of pairs of parameter id (string) and value
                (or None for a flag parameter without a value) for input parameters, e.g.::

                    {'-nc': None}
                    {'-thresh': threshold}
                    {'-m': 'ave' 's', '-nc': None}

            out_files (dict, optional): Dict of pairs of parameter id (string) and file
                path (string or list) for output files, e.g.::

                    {'-fel': 'filleddem.tif'}
                    {'-maxS': ['harden.tif', 'maxsimi.tif']}

            mpi_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for MPI setting, e.g.::

                    {'mpipath':'/soft/bin','hostfile':'/soft/bin/cluster.node','n':4}
                    {'mpipath':'/soft/bin', 'n':4}
                    {'n':4}

            log_params (dict, optional): Dict of pairs of parameter id (string) and value or
                path for runtime and log output parameters. e.g.::

                    {'logfile': '/home/user/log.txt',
                     'runtimefile': '/home/user/runtime.txt'}

        Returns:
            True if TauDEM run successfully, otherwise False.
        """
        # Check input files
        if in_files is None:
            TauDEM.error('Input files parameter is required!')
        if not isinstance(in_files, dict):
            TauDEM.error('The input files parameter must be a dict!')
        for (pid, infile) in iteritems(in_files):
            if infile is None:
                continue
            if isinstance(infile, list) or isinstance(infile, tuple):
                for idx, inf in enumerate(infile):
                    if inf is None:
                        continue
                    inf, wp = TauDEM.check_infile_and_wp(inf, wp)
                    in_files[pid][idx] = inf
                continue
            if os.path.exists(infile):
                infile, wp = TauDEM.check_infile_and_wp(infile, wp)
                in_files[pid] = os.path.abspath(infile)
            else:
                # For more flexible input files extension.
                # e.g., -inputtags 1 <path/to/tag1.tif> 2 <path/to/tag2.tif> ...
                # in such unpredictable circumstance, we cannot check the existance of
                # input files, so the developer will check it in other place.
                if len(StringClass.split_string(infile, ' ')) > 1:
                    continue
                else:  # the infile still should be a existing file, so check in workspace
                    if wp is None:
                        TauDEM.error('Workspace should not be None!')
                    infile = wp + os.sep + infile
                    if not os.path.exists(infile):
                        TauDEM.error('Input files parameter %s: %s is not existed!' %
                                     (pid, infile))
                    in_files[pid] = os.path.abspath(infile)
        # Make workspace dir if not existed
        UtilClass.mkdir(wp)
        # Check the log parameter
        log_file = None
        runtime_file = None
        if log_params is not None:
            if not isinstance(log_params, dict):
                TauDEM.error('The log parameter must be a dict!')
            if 'logfile' in log_params and log_params['logfile'] is not None:
                log_file = log_params['logfile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in log_file:
                    log_file = wp + os.sep + log_file
                    log_file = os.path.abspath(log_file)
            if 'runtimefile' in log_params and log_params['runtimefile'] is not None:
                runtime_file = log_params['runtimefile']
                # If log_file is just a file name, then save it in the default workspace.
                if os.sep not in runtime_file:
                    runtime_file = wp + os.sep + runtime_file
                    runtime_file = os.path.abspath(runtime_file)

        # remove out_files to avoid any file IO related error
        new_out_files = list()
        if out_files is not None:
            if not isinstance(out_files, dict):
                TauDEM.error('The output files parameter must be a dict!')
            for (pid, out_file) in iteritems(out_files):
                if out_file is None:
                    continue
                if isinstance(out_file, list) or isinstance(out_file, tuple):
                    for idx, outf in enumerate(out_file):
                        if outf is None:
                            continue
                        outf = FileClass.get_file_fullpath(outf, wp)
                        FileClass.remove_files(outf)
                        out_files[pid][idx] = outf
                        new_out_files.append(outf)
                else:
                    out_file = FileClass.get_file_fullpath(out_file, wp)
                    FileClass.remove_files(out_file)
                    out_files[pid] = out_file
                    new_out_files.append(out_file)

        # concatenate command line
        commands = list()
        # MPI header
        if mpi_params is not None:
            if not isinstance(mpi_params, dict):
                TauDEM.error('The MPI settings parameter must be a dict!')
            if 'mpipath' in mpi_params and mpi_params['mpipath'] is not None:
                commands.append(mpi_params['mpipath'] + os.sep + 'mpiexec')
            else:
                commands.append('mpiexec')
            if 'hostfile' in mpi_params and mpi_params['hostfile'] is not None \
                    and not StringClass.string_match(mpi_params['hostfile'], 'none') \
                    and os.path.isfile(mpi_params['hostfile']):
                commands.append('-f')
                commands.append(mpi_params['hostfile'])
            if 'n' in mpi_params and mpi_params['n'] > 1:
                commands.append('-n')
                commands.append(str(mpi_params['n']))
            else:  # If number of processor is less equal than 1, then do not call mpiexec.
                commands = []
        # append TauDEM function name, which can be full path or just one name
        commands.append(function_name)
        # append input files
        for (pid, infile) in iteritems(in_files):
            if infile is None:
                continue
            if pid[0] != '-':
                pid = '-' + pid
            commands.append(pid)
            if isinstance(infile, list) or isinstance(infile, tuple):
                commands.append(' '.join(tmpf for tmpf in infile))
            else:
                commands.append(infile)
        # append input parameters
        if in_params is not None:
            if not isinstance(in_params, dict):
                TauDEM.error('The input parameters must be a dict!')
            for (pid, v) in iteritems(in_params):
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                # allow for parameter which is an flag without value
                if v != '' and v is not None:
                    if MathClass.isnumerical(v):
                        commands.append(str(v))
                    else:
                        commands.append(v)
        # append output parameters
        if out_files is not None:
            for (pid, outfile) in iteritems(out_files):
                if outfile is None:
                    continue
                if pid[0] != '-':
                    pid = '-' + pid
                commands.append(pid)
                if isinstance(outfile, list) or isinstance(outfile, tuple):
                    commands.append(' '.join(tmpf for tmpf in outfile))
                else:
                    commands.append(outfile)
        # run command
        runmsg = UtilClass.run_command(commands)
        TauDEM.log(runmsg, log_file)
        TauDEM.output_runtime_to_log(function_name, runmsg, runtime_file)
        # Check out_files, raise RuntimeError if not exist.
        for of in new_out_files:
            if not os.path.exists(of):
                TauDEM.error('%s failed, and the %s was not generated!' % (function_name, of))
                return False
        return True
예제 #25
0
    def metis_partition(g, weight, wp, bin_dir):
        """Partition subbasins into multiple groups by METIS

        Args:
            g: `NetworkX.DiGraph` object
            weight: weight of each node, e.g., area of subbasin, {subbasinID: weight}
            wp: output directory
            bin_dir: directory of METIS package
        Returns:
            group_dict: {subbasinID: {'group': group_number_list,
                                      'kmetis': group_ids_list_by_kmetis,
                                      'pmetis': group_ids_list_by_pmetis}
                        }
        """
        group_dict = dict()
        for subbsn_id in g.nodes():
            group_dict[subbsn_id] = {'group': list(), 'kmetis': list(), 'pmetis': list()}

        metis_input = ImportReaches2Mongo.prepare_node_with_weight_for_metis(g, weight, wp)
        # Creating group divided numbers
        nlist = list(range(1, 129))
        nlist.extend([192, 256, 384, 512, 768, 1536])
        # nlist should be less than the number of subbasin, otherwise it will make nonsense.
        ns = g.nodes()
        nlist = [x for x in nlist if x <= max(ns)]
        # Make directories for KMETIS and PMETIS
        UtilClass.mkdir(wp + os.path.sep + 'kmetis')
        UtilClass.mkdir(wp + os.path.sep + 'pmetis')
        for n in nlist:
            print('divide number: %d' % n)
            if n <= 1:
                for subbsn_id in g.nodes():
                    group_dict[subbsn_id]['group'].append(1)
                    group_dict[subbsn_id]['kmetis'].append(0)
                    group_dict[subbsn_id]['pmetis'].append(0)
                continue
            # kmetis, -ptype=kway, direct k-way partitioning (default)
            str_command = '"%s/gpmetis" %s %d' % (bin_dir, metis_input, n)
            result = UtilClass.run_command(str_command)
            with open('%s/kmetis/kmetisResult%d.txt' % (wp, n), 'w') as f_metis_output:
                for line in result:
                    f_metis_output.write(line)
            metis_output = '%s.part.%d' % (metis_input, n)
            with open(metis_output, 'r') as f:
                lines = f.readlines()
            group_kmetis = [int(item) for item in lines]
            adjust_group_result(weight, group_kmetis, n)
            shutil.move(metis_output, '%s/kmetis/metis.part.%d' % (wp, n))

            # pmetis, -ptype=rb, recursive bisectioning
            str_command = '"%s/gpmetis" -ptype=rb %s %d' % (bin_dir, metis_input, n)
            result = UtilClass.run_command(str_command)
            with open('%s/pmetis/pmetisResult%d.txt' % (wp, n), 'w') as f_metis_output:
                for line in result:
                    f_metis_output.write(line)
            with open(metis_output, 'r') as f:
                lines = f.readlines()
            group_pmetis = [int(item) for item in lines]
            adjust_group_result(weight, group_pmetis, n)
            shutil.move(metis_output, '%s/pmetis/metis.part.%d' % (wp, n))

            for i, (gk, gp) in enumerate(zip(group_kmetis, group_pmetis)):
                group_dict[i + 1]['group'].append(n)
                group_dict[i + 1]['kmetis'].append(gk)
                group_dict[i + 1]['pmetis'].append(gp)
        return group_dict
    def metis_partition(g, weight, wp, bin_dir):
        """Partition subbasins into multiple groups by METIS

        Args:
            g: `NetworkX.DiGraph` object
            weight: weight of each node, e.g., area of subbasin, {subbasinID: weight}
            wp: output directory
            bin_dir: directory of METIS package
        Returns:
            group_dict: {subbasinID: {'group': group_number_list,
                                      'kmetis': group_ids_list_by_kmetis,
                                      'pmetis': group_ids_list_by_pmetis}
                        }
        """
        group_dict = dict()
        for subbsn_id in g.nodes():
            group_dict[subbsn_id] = {
                'group': list(),
                'kmetis': list(),
                'pmetis': list()
            }

        metis_input = ImportReaches2Mongo.prepare_node_with_weight_for_metis(
            g, weight, wp)
        # Creating group divided numbers
        nlist = list(range(1, 129))
        nlist.extend([192, 256, 384, 512, 768, 1536])
        # nlist should be less than the number of subbasin, otherwise it will make nonsense.
        ns = g.nodes()
        nlist = [x for x in nlist if x <= max(ns)]
        # Make directories for KMETIS and PMETIS
        UtilClass.mkdir(wp + os.path.sep + 'kmetis')
        UtilClass.mkdir(wp + os.path.sep + 'pmetis')
        for n in nlist:
            print('divide number: %d' % n)
            if n <= 1:
                for subbsn_id in g.nodes():
                    group_dict[subbsn_id]['group'].append(1)
                    group_dict[subbsn_id]['kmetis'].append(0)
                    group_dict[subbsn_id]['pmetis'].append(0)
                continue
            # kmetis, -ptype=kway, direct k-way partitioning (default)
            str_command = '"%s/gpmetis" %s %d' % (bin_dir, metis_input, n)
            result = UtilClass.run_command(str_command)
            with open('%s/kmetis/kmetisResult%d.txt' % (wp, n),
                      'w') as f_metis_output:
                for line in result:
                    f_metis_output.write(line)
            metis_output = '%s.part.%d' % (metis_input, n)
            with open(metis_output, 'r') as f:
                lines = f.readlines()
            group_kmetis = [int(item) for item in lines]
            adjust_group_result(weight, group_kmetis, n)
            shutil.move(metis_output, '%s/kmetis/metis.part.%d' % (wp, n))

            # pmetis, -ptype=rb, recursive bisectioning
            str_command = '"%s/gpmetis" -ptype=rb %s %d' % (bin_dir,
                                                            metis_input, n)
            result = UtilClass.run_command(str_command)
            with open('%s/pmetis/pmetisResult%d.txt' % (wp, n),
                      'w') as f_metis_output:
                for line in result:
                    f_metis_output.write(line)
            with open(metis_output, 'r') as f:
                lines = f.readlines()
            group_pmetis = [int(item) for item in lines]
            adjust_group_result(weight, group_pmetis, n)
            shutil.move(metis_output, '%s/pmetis/metis.part.%d' % (wp, n))

            for i, (gk, gp) in enumerate(zip(group_kmetis, group_pmetis)):
                group_dict[i + 1]['group'].append(n)
                group_dict[i + 1]['kmetis'].append(gk)
                group_dict[i + 1]['pmetis'].append(gp)
        return group_dict