def get_result_file_name(module, cas_file):
    """
       Returns the name of the result file for a given case.

       @param module	name of the telemac module
       @param cas_file	name of the telemac steering file
    """
    dico_file = path.join(environ['HOMETEL'], 'sources', module, module+'.dico')
    cas = TelemacCas(cas_file, dico_file)
    res_file = cas.get(KEY_RES[module])
    return res_file
コード例 #2
0
ファイル: template.py プロジェクト: biqilong/TELEMAC_MCPBE
def get_fortran_file(steering_file, module):
    """
    Get the fortran file from a cas (looks into coupled steering files as well)

    @param steering_file Name of the steering file
    @param module Name of the module
    """
    dico = path.join(environ['HOMETEL'], 'sources', module, module + '.dico')
    cas = TelemacCas(steering_file, dico)
    fortran_file = cas.get('FORTRAN FILE', '')

    if fortran_file == '':
        # Only searching in coupled files for telemac2d and telemac3d
        fortran_file = None
        if module in ["telemac2d", "telemac3d"]:
            cpl_with = cas.get('COUPLING WITH', '')
            if cpl_with == '':
                return None
            cpl_mods = cpl_with.lower().split(';')
            for cpl_mod in cpl_mods:
                cpl_dico = path.join(environ['HOMETEL'], 'sources', cpl_mod,
                                     cpl_mod + '.dico')
                cpl_steering_file = cas.get(cpl_mod.upper() + ' STEERING FILE')
                # Some coupled module do not have a dictionary (nestor, waqtel)
                if path.exists(cpl_dico):
                    cpl_cas = TelemacCas(cpl_steering_file, cpl_dico)
                    fortran_file = cpl_cas.get('FORTRAN FILE', '')
                    del cpl_cas
                    if fortran_file != '':
                        return fortran_file
            return None

    return fortran_file
コード例 #3
0
ファイル: vnv_confluence.py プロジェクト: meracan/dockers
    def _pre(self):
        """
        Defining the studies
        """

        # confluence scalar mode
        self.add_study('vnv_seq', 'telemac2d', 't2d_confluence.cas')

        # confluence parallel mode
        cas = TelemacCas('t2d_confluence.cas', get_dico('telemac2d'))
        cas.set('PARALLEL PROCESSORS', 4)

        self.add_study('vnv_par',
                       'telemac2d',
                       't2d_confluence_par.cas',
                       cas=cas)

        del cas
コード例 #4
0
def copy_cas(cas_file, new_cas_file, module, modifs):
    """
    Creates a new cas file from an existing one modifying some values
    The file will have the same metadata as the old one

    @param cas_file (string) Name of the orginal cas
    @param new_cas_file (string) Name of the new cas
    @param module (string) Name of the telemac-mascaret module
    @param modifs (dict) dict where the key is the keyword
                  to modify and value the one to apply
    """
    from execution.telemac_cas import TelemacCas
    from datetime import datetime
    import time
    dico_file = path.join(environ['HOMETEL'], 'sources', module,
                          module + '.dico')
    cas = TelemacCas(cas_file, dico_file)

    # update of cas file
    for key, value in modifs.items():
        cas.set(key, value)

    # Writing new cas file
    cas.write(new_cas_file)

    # Transfering modified data of cas_file to the new_file
    # This is to avoid validate_telemac.py to rerun the validation every time
    date = datetime.fromtimestamp(int(path.getmtime(cas_file)))

    mod_time = time.mktime(date.timetuple())
    utime(new_cas_file, (mod_time, mod_time))

    print('    ~> Creating "{}"'.format(path.basename(new_cas_file)))

    return [], [], [], []
コード例 #5
0
def main():
    """ Main function of partel.py """
    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nLoading Options and Configurations\n' + 72 * '~' + '\n')
    parser = argparse.ArgumentParser(description='Translate a keyword')
    parser = add_config_argument(parser)
    parser.add_argument("module",
                        choices=[
                            'postel3d', 'telemac2d', 'telemac3d', 'tomawac',
                            'artemis', 'sisyphe', 'waqtel', 'khione', 'stbtel'
                        ],
                        help="Name of the module for which to translate")
    parser.add_argument(
        "cas_file",
        help="Name of the steering file to translatefile to be partitionned")
    args = parser.parse_args()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    update_config(args)
    cfg = CFGS.configs[CFGS.cfgname]
    CFGS.compute_execution_info()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # Searching for the dictionary associated with the steering case
    dico_file = path.join(cfg['MODULES'][args.module]['path'],
                          args.module + '.dico')
    if not path.exists(dico_file):
        raise TelemacException(\
            'Could not find the dictionary file: {}'.format(dico_file))
    cas = TelemacCas(args.cas_file, dico_file, check_files=False)

    cas.write_fr_gb()

    print('\n\nMy work is done\n\n')
    sys.exit(0)
コード例 #6
0
def copy_file_to_tmp(test_dir, tmp_dir, module, root_dir, skip_test):
    #@TODO: Merge with vvytel/copy_file_to_valid_dir
    """
       Copy all the files needed by the test case into the temporary folder.

       @param test_dir	path to the test case to validate
       @param tmp_dir	path to the test case temporary folder
       @param module	Name of the module
       @param root_dir	Root directory of the installation
       @param skip_test	Test cases to skip
    """
    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    if not path.exists(tmp_dir):
        mkdir(tmp_dir)
    else:
        shutil.rmtree(tmp_dir)
        mkdir(tmp_dir)
    chdir(tmp_dir)
    # Getting list on input/output files from the dictionary
    dico_file = path.join(root_dir, 'sources', module, module + '.dico')
    # Getting list of steering file
    cas_files_path = glob(test_dir + sep + '*.cas')
    list_file = []
    for cas_file in cas_files_path:
        if path.basename(cas_file) in skip_test:
            continue
        shutil.copyfile(cas_file, path.basename(cas_file))
        cas = TelemacCas(cas_file, dico_file)
        user_fortran = None
        # Looping on input files
        for key in cas.in_files:
            ffile = cas.values[key]
            # if we have a user fortran
            if 'FORTRAN' in key:
                if path.exists(path.join(tmp_dir, ffile)) and\
                    not path.isfile(path.join(tmp_dir, ffile)):
                    shutil.rmtree(path.join(tmp_dir, ffile))
                user_fortran = ffile
                if path.isfile(test_dir + sep + ffile):
                    shutil.copyfile(test_dir + sep + ffile, ffile)
                else:
                    shutil.copytree(test_dir + sep + ffile, ffile)
            else:
                shutil.copyfile(test_dir + sep + ffile, ffile)
        list_file.append((path.basename(cas_file), user_fortran))
    return list_file
コード例 #7
0
ファイル: study.py プロジェクト: ogourgue/telemac-on-mac
    def __init__(self, steering_file, code_name, working_dir):
        """
        Init function

        @param steering_file (string) Name of the steering file to run
        @param code_name (string) Name of the module used
        @param working_dir_name (string) If not empty will be the name of the
                                         working directory
        """
        if not path.exists(steering_file):
            raise TelemacException(\
              "Could not find your steering file :\n{}".format(steering_file))
        self.steering_file = steering_file
        self.case_dir = path.dirname(path.realpath(self.steering_file))
        self.working_dir = ''
        self.code_name = code_name
        self.sortie_file = ''
        self.exe_name = ''
        self.run_cmd = ''
        self.mpi_cmd = ''
        self.par_cmd = ''

        # Getting configuration information
        self.cfgname = CFGS.cfgname
        self.cfg = CFGS.configs[CFGS.cfgname]

        # Searching for the dictionary associated with the steering case
        self.dico_file = path.join(self.cfg['MODULES'][self.code_name]['path'],
                                   self.code_name + '.dico')
        if not path.exists(self.dico_file):
            raise StudyException(self,\
                'Could not find the dictionary file: {}'.format(self.dico_file))

        # ~~> processing steegin file
        self.cas = TelemacCas(self.steering_file, self.dico_file)

        # parsing informations for coupled modules steering files
        cplages = self.cas.get('COUPLING WITH', '').split(',')

        self.ncnode = 1
        self.nctile = 1
        self.ncsize = self.cas.get('PARALLEL PROCESSORS', default=1)
        self.lang = self.cas.lang

        self.cpl_cases = {}

        #/!\ having done the loop this way it will not check for DELWAQ
        cpl_codes = []
        for cplage in cplages:
            for mod in self.cfg['MODULES']:
                if mod in cplage.lower():
                    cpl_codes.append(mod)

        for code in cpl_codes:
            # ~~~~ Extract the CAS File name ~~~~~~~~~~~~~~~~~~~~~~~
            cas_name_cpl = self.cas.get(\
                      code.upper()+' STEERING FILE')
            cas_name_cpl = path.join(self.case_dir, cas_name_cpl)

            if not path.isfile(cas_name_cpl):
                raise StudyException(self,\
                     'Missing coupling steering file for '+code+': '+\
                           cas_name_cpl)

            # ~~ Read the coupled CAS File ~~~~~~~~~~~~~~~~~~~~~~~~~
            dico_file_plage = path.join(self.cfg['MODULES'][code]['path'],
                                        code + '.dico')
            cas_plage = TelemacCas(cas_name_cpl, dico_file_plage)

            self.cpl_cases[code] = cas_plage

        # ~~> structural assumptions
        self.bin_path = path.join(self.cfg['root'], 'builds', self.cfgname,
                                  'bin')
        self.obj_path = self.cfg['MODULES'][self.code_name]['path'].replace(\
               path.join(self.cfg['root'], 'sources'),
               path.join(self.cfg['root'], 'builds', self.cfgname, 'obj'))
        self.lib_path = path.join(self.cfg['root'], 'builds', self.cfgname,
                                  'lib')

        self.set_working_dir(working_dir)
        self.set_exe()
コード例 #8
0
ファイル: study.py プロジェクト: ogourgue/telemac-on-mac
class Study(object):
    """
    Define a study it can them be split, compiled, run, merge
    """
    def __init__(self, steering_file, code_name, working_dir):
        """
        Init function

        @param steering_file (string) Name of the steering file to run
        @param code_name (string) Name of the module used
        @param working_dir_name (string) If not empty will be the name of the
                                         working directory
        """
        if not path.exists(steering_file):
            raise TelemacException(\
              "Could not find your steering file :\n{}".format(steering_file))
        self.steering_file = steering_file
        self.case_dir = path.dirname(path.realpath(self.steering_file))
        self.working_dir = ''
        self.code_name = code_name
        self.sortie_file = ''
        self.exe_name = ''
        self.run_cmd = ''
        self.mpi_cmd = ''
        self.par_cmd = ''

        # Getting configuration information
        self.cfgname = CFGS.cfgname
        self.cfg = CFGS.configs[CFGS.cfgname]

        # Searching for the dictionary associated with the steering case
        self.dico_file = path.join(self.cfg['MODULES'][self.code_name]['path'],
                                   self.code_name + '.dico')
        if not path.exists(self.dico_file):
            raise StudyException(self,\
                'Could not find the dictionary file: {}'.format(self.dico_file))

        # ~~> processing steegin file
        self.cas = TelemacCas(self.steering_file, self.dico_file)

        # parsing informations for coupled modules steering files
        cplages = self.cas.get('COUPLING WITH', '').split(',')

        self.ncnode = 1
        self.nctile = 1
        self.ncsize = self.cas.get('PARALLEL PROCESSORS', default=1)
        self.lang = self.cas.lang

        self.cpl_cases = {}

        #/!\ having done the loop this way it will not check for DELWAQ
        cpl_codes = []
        for cplage in cplages:
            for mod in self.cfg['MODULES']:
                if mod in cplage.lower():
                    cpl_codes.append(mod)

        for code in cpl_codes:
            # ~~~~ Extract the CAS File name ~~~~~~~~~~~~~~~~~~~~~~~
            cas_name_cpl = self.cas.get(\
                      code.upper()+' STEERING FILE')
            cas_name_cpl = path.join(self.case_dir, cas_name_cpl)

            if not path.isfile(cas_name_cpl):
                raise StudyException(self,\
                     'Missing coupling steering file for '+code+': '+\
                           cas_name_cpl)

            # ~~ Read the coupled CAS File ~~~~~~~~~~~~~~~~~~~~~~~~~
            dico_file_plage = path.join(self.cfg['MODULES'][code]['path'],
                                        code + '.dico')
            cas_plage = TelemacCas(cas_name_cpl, dico_file_plage)

            self.cpl_cases[code] = cas_plage

        # ~~> structural assumptions
        self.bin_path = path.join(self.cfg['root'], 'builds', self.cfgname,
                                  'bin')
        self.obj_path = self.cfg['MODULES'][self.code_name]['path'].replace(\
               path.join(self.cfg['root'], 'sources'),
               path.join(self.cfg['root'], 'builds', self.cfgname, 'obj'))
        self.lib_path = path.join(self.cfg['root'], 'builds', self.cfgname,
                                  'lib')

        self.set_working_dir(working_dir)
        self.set_exe()

    def set_working_dir(self, working_dir_name=''):
        """
        Set the working directory for the study by default:
        steering_cas_YYYY-MM-HHh-MMm-SSs/

        @param working_dir_name (string) If not empty will be the name of the
                                         working directory
        """
        # ~~> default temporary directory name
        # /!\ includes date/time in the name
        tmp_dir = self.case_dir+sep+\
                  path.basename(self.steering_file) + '_' + \
                  strftime("%Y-%m-%d-%Hh%Mmin%Ss", localtime())
        wdir = tmp_dir
        self.working_dir = wdir
        self.sortie_file = wdir
        # ~~> user defined directory name
        if working_dir_name != '':
            wdir = path.join(self.case_dir, working_dir_name)
            self.working_dir = wdir

    def create_working_dir(self):
        """
        Creates the working_dir for the study
        """
        # ~~> dealing with the temporary directory
        if not path.exists(self.working_dir):
            mkdir(self.working_dir)

    def set_ncsize(self, ncsize, ncnode, nctile):
        """
        Overwrite the nimber of parallel processor in the steering file

        @param ncsize (int) The total number of cores
        @param nctile (int): number of cores per node given by user
        @param ncnode (int): number of nodes given by user
        """
        self.nctile, self.ncnode, ncsize = \
                check_para_tilling(nctile, ncnode,
                                   ncsize, 1, self.ncsize)
        if self.cfg['MPI'] != {}:
            ncsize = max(1, ncsize)
        elif ncsize > 1:
            raise StudyException(self,\
              '\nParallel inconsistency: ' \
              '\n     +> you may be using an inappropriate configuration: '\
              +self.cfgname+ \
              '\n     +> or may be wishing for scalar mode while setting to '\
              +str(ncsize)+' processors')
        if self.cfg['MPI'] == {}:
            ncsize = 0
        # ~~ Forces keyword if parallel ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # /!\ in case of multiple CAS files, you have to have the same ncsize
        self.cas.set('PARALLEL PROCESSORS', ncsize)
        # Adding in coupled cas file as well
        for code in self.cpl_cases:
            self.cpl_cases[code].set('PARALLEL PROCESSORS', ncsize)
        self.ncsize = ncsize

    def fill_working_dir(self, use_link=False):
        """
        Filling temporary folder copying files

        @param use_link (boolean) If True making link instead of copy
        """
        # >>> Placing yourself where the CAS File is
        chdir(self.case_dir)
        # >>> Copy INPUT files into wdir
        process_lit(\
            self.cas,
            self.case_dir,
            self.ncsize,
            self.working_dir,
            use_link)
        # Adding section name to CAS file information as the coupled
        # module might have sections and zones as well

        for cas_cpl in self.cpl_cases.values():
            process_lit(\
                cas_cpl,
                self.case_dir,
                self.ncsize,
                self.working_dir,
                use_link)
        # >>> Placing yourself into the wdir
        chdir(self.working_dir)
        # >>> Creating LNG file
        process_config(self.lang)

    def build_mpi_cmd(self, hosts):
        """
        Set the mpi command string

        @param hosts (string) Name of the host on which to run the mpi command
        """
        # ~~> MPI host file provided through the command line
        if hosts != '':
            if 'HOSTS' in self.cfg['MPI']:
                self.cfg['MPI']['HOSTS'] = hosts.replace(':', ' ')
            else:
                self.cfg['MPI'].update({'HOSTS': hosts.replace(':', ' ')})
        # ~~> MPI Command line and options ( except <exename> )
        # /!\ cfg['MPI'] is also modified
        mpicmd = get_mpi_cmd(self.cfg['MPI'])\
                  .replace('<root>', self.cfg['root'])
        # mpi_exec supports: -n <ncsize> -wdir <wdir> <exename>
        mpicmd = mpicmd.replace('<ncsize>', str(self.ncsize))
        # >>> Parallel execution configuration
        mpi = mpicmd
        # ~~> filling in the blanks
        mpi = mpi.replace('<wdir>', self.working_dir)
        self.mpi_cmd = mpi

    def set_exe(self):
        """
        Set the name of the executable of the study
        """
        ext = self.cfg['SYSTEM']['sfx_exe']
        user_fortran = self.cas.get("FORTRAN FILE")
        # If we have a user_fortran
        if user_fortran != '':
            use_file = 'out_user_fortran' + ext
            exe_name = path.join(self.working_dir, use_file)
        else:
            exe_file = path.join(self.bin_path, self.code_name + ext)
            use_file = 'out_' + path.basename(exe_file)
            exe_name = path.join(self.working_dir, use_file)

        self.exe_name = exe_name
        self.run_cmd = self.exe_name

    def compile_exe(self):
        """
        Compile the executable

        @param hpcpass (boolean) If True not creating the PARAL and CONFIG
                                 files
        @param split (boolean) If True only doing split part
        @param bypass (boolean) If True bypassing errors
        """
        # >>> Placing yourself in the temporary folder
        chdir(self.working_dir)

        self.exe_name = process_executable(\
            self.working_dir,
            self.bin_path, self.lib_path,
            self.obj_path, self.cfg['SYSTEM'],
            self.cfg['TRACE'], self.code_name)

    def generate_mpi_files(self):
        """
        Generate the PARAL and HOSTFILE files need by telemac-mascaret
        """

        if self.cfg['MPI'] != {}:
            # ~~> MPI host file ( may be re-written by the HPC INFILE script )
            hostfile = self.cfg['MPI']['HOSTFILE']
            hosts = []
            n = 0
            while n < self.ncsize:
                for i in self.cfg['MPI']['HOSTS'].split():
                    hosts.append(i)
                    n += 1
                    if n == self.ncsize:
                        break
            chdir(self.working_dir)
            # ~~> Creating the HOST file
            put_file_content(hostfile, hosts)
            # ~~> Creating the PARA file
            put_file_content('PARAL', [
                str(self.ncsize),
                str(len(self.working_dir + sep)), self.working_dir + sep, ''
            ])

    def partionning(self, use_link):
        """
        Running partionning of the input files
        """
        # No partitionning to do
        if self.ncsize <= 1:
            return
        chdir(self.working_dir)
        # ~~> Path
        bin_path = path.join(self.cfg['root'], 'builds', self.cfgname, 'bin')
        parcmd = get_partel_cmd(bin_path, self.cfg, self.mpi_cmd)
        # >>> Add running command
        self.par_cmd = parcmd

        # ~~> Run PARTEL for the base files
        # Global GEO file
        g_geo, g_fmt_geo, g_conlim = get_glogeo(self.cas)
        # Setting section, zone, weirs file (needed by partel) but only
        # available in telemac2d
        if self.code_name == 'telemac2d':
            # section file
            if 'SECTIONS INPUT FILE' in self.cas.in_files:
                submit = self.cas.in_files['SECTIONS INPUT FILE']
                section_name = submit.split(';')[1]
            else:
                section_name = ''
            # Zone file
            if 'ZONES FILE' in self.cas.in_files:
                submit = self.cas.in_files['ZONES FILE']
                zone_name = submit.split(';')[1]
            else:
                zone_name = ''
            # Weirs are only passed to partel if type of weirs == 2
            if 'WEIRS FILE' in self.cas.values and \
               self.cas.values.get('TYPE OF WEIRS', 0) == 2:
                submit = self.cas.in_files['WEIRS FILE']
                weir_name = submit.split(';')[1]
            else:
                weir_name = ''
        else:
            section_name = ''
            zone_name = ''
            weir_name = ''
        # Identify the partitioner to use for Partel
        i_part = get_partitionner(self.cas.get('PARTITIONING TOOL'))
        #Are we gonna concatenate the output of partel or not ?
        concat = self.cas.get('CONCATENATE PARTEL OUTPUT', '')
        s_concat = 'YES' if concat else 'NO'

        # ~~> Run partitioning/duplication for all input files
        run_partition(parcmd, self.cas, g_geo, g_fmt_geo, g_conlim,
                      self.ncsize, section_name, zone_name, weir_name,
                      use_link, i_part, s_concat)

        # Same actions for coupled steering files
        for cas_cpl in self.cpl_cases.values():
            g_geo, g_fmt_geo, g_conlim = get_glogeo(cas_cpl)
            run_partition(parcmd, cas_cpl, g_geo, g_fmt_geo, g_conlim,
                          self.ncsize, '', '', '', use_link, i_part, s_concat)

    def set_sortie(self, sortie_file, merge):
        """
        Defining name of 'sortie' files
        """
        if not sortie_file:
            self.sortie_file = None
        else:
            if merge:
                # try re-using existing/latest sortie file with same root name
                output_dir = path.join(self.working_dir,
                                       path.basename(self.steering_file))
                sortie_file = get_latest_output_files(output_dir)[0]
                self.sortie_file = path.basename(sortie_file)
            else:
                # define the filename (basename) of the sortie file
                self.sortie_file = path.basename(self.sortie_file) + '.sortie'

    def run_local(self):
        """
        Local run of the study (sequential or parallel)
        """
        chdir(self.working_dir)
        print('\n\n' + self.run_cmd + '\n\n')
        # ~~> here you go run
        run_code(self.run_cmd, self.sortie_file)

    def run_hpc_exe(self, options, job_id=''):
        """
        Run only the execution of telemac-mascaret executable in job scheduler

        @return job_id (integer) Id of the job that was launched
        """
        # /!\ This is being done in parallel when multiple cas_files
        #if not hpcpass:
        chdir(self.working_dir)
        # ~~> HPC Command line launching runcode
        hpccmd = get_hpc_cmd(self.cfg['HPC']).replace('<root>',
                                                      self.cfg['root'])
        hpccmd = hpccmd.replace('<wdir>', self.working_dir)
        # ~~> HPC dependency between jobs
        hpcjob = get_hpc_depend(self.cfg['HPC'])
        if hpcjob != '' and job_id != '':
            hpccmd = hpccmd + ' ' + hpcjob.replace('<jobid>', job_id)
        # ~~> HPC queueing script
        stdin_file = self.cfg['HPC']['STDIN'][0]  # only one key for now
        stdin = self.cfg['HPC']['STDIN'][1]
        stdin, sortie = self.fill_hpc_stdin(stdin, options)
        # working from working dir
        stdin = stdin.replace('<wdir>', self.working_dir)
        # ~~> Recreate the <mpi_exec> (option --hpc)
        stdin = stdin.replace('<exename>', self.run_cmd)
        # /!\ serial mode
        stdin = stdin.replace('<mpi_cmdexec>', self.run_cmd)

        # ~~> Write to HPC_STDIN
        chdir(self.working_dir)
        put_file_content(stdin_file, stdin.split('\n'))

        # ~~> here you go run
        run_code(hpccmd, sortie)

        job_id = get_file_content(sortie)[0].strip()
        print('... Your simulation ('+self.steering_file+\
              ') has been launched through the queue.\n')
        print('   +> You need to wait for completion before re-collecting'\
              'files using the option --merge\n')

        return job_id

    def fill_hpc_stdin(self, stdin, options):
        """
        Replacing tags in file with the one given in options

        @param stdin (string) The content of HPC_STDIN
        @param options (Values) Options of the script runcode.py

        @return (stdin, sortie) The update content of HPC_STDIN and sortie
        """
        if self.cfg['MPI'] != {}:
            stdin = stdin.replace('<hosts>', self.cfg['MPI']['HOSTS'])
        stdin = stdin.replace('<root>', self.cfg['root'])
        stdin = stdin.replace('<configName>', self.cfgname)
        stdin = stdin.replace('<ncsize>', str(self.ncsize))
        stdin = stdin.replace('<nctile>', str(self.nctile))
        stdin = stdin.replace('<ncnode>', str(self.ncnode))
        stdin = stdin.replace('<email>', options.email)
        stdin = stdin.replace('<jobname>', options.jobname)
        time = strftime("%Y-%m-%d-%Hh%Mmin%Ss", localtime())
        stdin = stdin.replace('<time>', time)
        stdin = stdin.replace('<queue>', options.hpc_queue)
        stdin = stdin.replace('<walltime>', options.walltime)
        stdin = stdin.replace('<codename>', self.code_name)
        stdin = stdin.replace('\n ', '\n')
        stdin = stdin.replace('<wdir>', self.case_dir)

        sortie = 'hpc-job.sortie'
        if options.sortie_file:
            sortie = self.sortie_file
        stdin = stdin.replace('<sortiefile>', sortie)

        return stdin, sortie

    def run_hpc_full(self, options, job_id=''):
        """
        Rerun whole script in jpbscheduler

        @return job_id (integer) Id of the job that was launched
        """
        chdir(self.working_dir)
        # ~~> HPC Command line launching runcode
        hpccmd = get_hpc_cmd(self.cfg['HPC']).replace('<root>',
                                                      self.cfg['root'])
        hpccmd = hpccmd.replace('<wdir>', self.working_dir)

        # ~~> HPC dependency between jobs
        hpcjob = get_hpc_depend(self.cfg['HPC'])
        if hpcjob != '' and job_id != '':
            hpccmd = hpccmd + ' ' + hpcjob.replace('<jobid>', job_id)

        # ~~> HPC queueing script
        stdin_file = self.cfg['HPC']['STDIN'][0]  # only one key for now

        stdin = self.cfg['HPC']['STDIN'][1]
        stdin = stdin.replace('<exename>', self.steering_file)
        # Replacing tags by options values
        stdin, sortie = self.fill_hpc_stdin(stdin, options)

        # Building runcode.py command
        runcmd = 'runcode.py ' + self.code_name + ' --mpi '
        if options.config_name != '':
            runcmd = runcmd + ' -c ' + options.cfgname
        if options.config_file != '':
            runcmd = runcmd + ' -f ' + options.config_file
        if options.root_dir != '':
            runcmd = runcmd + ' -r ' + options.root_dir
        runcmd = runcmd + ' -s '
        if options.tmpdirectory:
            runcmd = runcmd + ' -t '
        runcmd = runcmd + ' -w ' + self.working_dir
        runcmd = runcmd + ' --nctile ' + str(self.nctile)
        runcmd = runcmd + ' --ncnode ' + str(self.ncnode)
        runcmd = runcmd + ' --ncsize ' + str(self.ncsize)
        if options.split:
            runcmd = runcmd + ' --split '
        if options.compileonly:
            runcmd = runcmd + ' -x '
        if options.merge:
            runcmd = runcmd + ' --merge '
        if options.run:
            runcmd = runcmd + ' --run '
        runcmd = runcmd + ' ' + self.steering_file
        stdin = stdin.replace('<py_runcode>', runcmd)

        # ~~> Write to HPC_STDIN
        chdir(self.working_dir)
        put_file_content(stdin_file, stdin.split('\n'))

        # ~~> here you go run
        run_code(hpccmd, sortie)

        job_id = get_file_content(sortie)[0].strip()
        print('... Your simulation ('+self.steering_file+\
              ') has been launched through the queue.\n')
        print('    +> You need to wait for completion '\
              'before checking on results.\n')

        return job_id

    def run(self, options):
        """
        Running the study

        @param hpcpass (boolean) ???
        @param options (Values) options of runcode.py
        """
        # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
        # ~~ Running the Executable ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # You need to do this if ...
        #     - options.split is out already
        #     - options.compileonly is out already
        #     - if options.run, obvisouly this is the main run of the executable
        # Inputs ...
        #     - runcmd if options.hpc
        #     - cas_files[name]['run'] and cas_files[name]['sortie'] otherwise
        # update mpi command if necessary
        if self.cfg['MPI'] != {} or options.mpi:
            self.run_cmd = self.mpi_cmd.replace('<exename>', self.exe_name)
        if self.cfg['HPC'] == {} or options.mpi:
            self.run_local()

        # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
        # ~~ Handling the HPC before running ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # You need to do this if ...
        #     - if options.run, obvisouly this is the main executable to run
        # Inputs ...
        #     - ncsize, nctilem ncnode, wdir, casdir, options, code_name
        #     - cfg['HPC']['STDIN'] and cfg['MPI']['HOSTS']
        #     - cas_files.values()[0]['sortie'] and cas_files.values()[0]['exe']
        #     - cas_files[name]['run']
        # Outputs ...
        #     > runcmd and put_file_content(stdinfile,)
        elif 'STDIN' not in self.cfg['HPC']:
            raise StudyException(self,\
                   '\nI would need the key hpc_stdin in you '\
                   'configuration so I can launch your simulation '\
                   'on the HPC queue.')
        elif 'EXCODE' in self.cfg['HPC']:
            self.run_hpc_exe(options)
        elif 'PYCODE' in self.cfg['HPC']:
            self.run_hpc_full(options)

    def merge(self):
        """
        Run gretel on file that need it
        """
        # No merging to do
        if self.ncsize <= 1:
            return
        # ~~> Path
        bin_path = path.join(self.cfg['root'], 'builds', self.cfgname, 'bin')
        execmd = get_gretel_cmd(bin_path, self.cfg)\
                  .replace('<root>', self.cfg['root'])
        # ~~> Run GRETEL
        chdir(self.working_dir)
        # Global GEO file
        cas = self.cas
        g_geo, g_fmt_geo, g_bnd = get_glogeo(cas)
        run_recollection(\
                execmd, cas, g_geo, g_fmt_geo, g_bnd,
                self.ncsize)

        # Running it for coupled steering files
        for cas_cpl in self.cpl_cases.values():
            g_geo, g_fmt_geo, g_bnd = get_glogeo(cas_cpl)
            run_recollection(\
                    execmd, cas_cpl, g_geo, g_fmt_geo, g_bnd,
                    self.ncsize)

    def gather(self, sortie_file, nozip):
        """
        Gather back output files

        @param sortie_file (boolean) If True copying log from working_dir
        @param nozip (boolean) If False log files are zipped together
        """
        sortiefiles = []
        chdir(self.working_dir)
        # ~~> copying all primary result files
        cas = self.cas
        files = process_ecr(cas, self.case_dir, self.sortie_file, self.ncsize)
        if sortie_file:
            sortiefiles.extend(files)
        # ~~> copying all coupled result files
        for cas_cpl in self.cpl_cases.values():
            files = process_ecr(cas_cpl, self.case_dir, None, self.ncsize)
            if sortie_file:
                sortiefiles.extend(files)
        # ~~> zipping sortie files if necessary
        if not nozip and self.ncsize > 1 and sortie_file:
            zipsortie(sortiefiles[0])

        # ~~> post-processing the ARTEMIS animation file if necessary
        if self.code_name == 'artemis':
            value = self.cas.get('FREE SURFACE FILE')
            if value.strip() != '':
                files = process_artnim(cas, self.case_dir)

    def delete_working_dir(self):
        """
        Delete the working dir
        """
        chdir(self.case_dir)
        remove_directories(self.working_dir)
コード例 #9
0
ファイル: sis2gaia.py プロジェクト: biqilong/TELEMAC_MCPBE
def sis2gaia(sis_cas_file, gaia_cas_file):
    """
    Convert a sisyphe steering file into a gaia steering file

    @param sis_cas_file (string) Sisyphe cas file
    @param gaia_cas_file (string) Gaia cas file
    """
    trans_key = {
        'NUMBER OF BED MODEL LAYERS':
        'NUMBER OF LAYERS FOR INITIAL STRATIFICATION',
        'SOLVER FOR SUSPENSION': 'SOLVER FOR DIFFUSION OF SUSPENSION',
        'SOLVER OPTION FOR SUSPENSION':
        'SOLVER OPTION FOR DIFFUSION OF SUSPENSION',
        'PRECONDITIONING FOR SUSPENSION':
        'PRECONDITIONING FOR DIFFUSION OF SUSPENSION',
        'SOLVER ACCURACY FOR SUSPENSION':
        'ACCURACY FOR DIFFUSION OF SUSPENSION',
        'SUSPENSION': 'SUSPENSION FOR ALL SANDS',
        'REFERENCE CONCENTRATION FORMULA':
        'SUSPENSION TRANSPORT FORMULA FOR ALL SANDS',
        'TETA SUSPENSION': 'TETA IMPLICITATION FOR SUSPENSION',
        'CRITICAL SHEAR VELOCITY FOR MUD DEPOSION':
        'CLASSES CRITICAL SHEAR STRESS FOR MUD DEPOSITION',
        'BED LOAD': 'BED LOAD FOR ALL SANDS',
        'BED-LOAD TRANSPORT FORMULA':
        'BED-LOAD TRANSPORT FORMULA FOR ALL SANDS',
        'SEDIMENT DIAMETERS': 'CLASSES SEDIMENT DIAMETERS',
        'SETTLING VELOCITIES': 'CLASSES SETTLING VELOCITIES',
        'SEDIMENT DENSITY': 'CLASSES SEDIMENT DENSITY',
        'HIDING FACTOR FOR PATICULAR SIZE CLASS': 'CLASSES HIDING FACTOR',
        'SHIELDS PARAMETERS': 'CLASSES SHIELD PARAMETERS',
        'INITIAL FRACTION FOR PARTICULAR SIZE CLASS':
        'CLASSES INTITIAL FRACTION',
        'PARTHENIADES CONSTANT': 'LAYERS PARTHENIADES CONSTANT',
        'MUD CONCENTRATION PER LAYER': 'LAYERS MUD CONCENTRATION',
        'CRITICAL EROSION SHEAR STRESS OF THE MUD':
        'LAYERS CRITICAL EROSION SHEAR STRESS OF THE MUD',
        'MASS TRANSFER PER LAYER': 'LAYERS MASS TRANSFER',
        'NON COHESIVE BED POROSITY': 'LAYERS NON COHESIVE BED POROSITY'
    }
    removed_keys = [
        'STATIONARY MODE', 'CONSTANT FLOW DISCHARGE',
        'NUMBER OF ITERATIONS FOR TELEMAC', 'CRITERION TO UPDATE THE FLOW',
        'CRITICAL EVOLUTION RATIO', 'NUMBER OF TIME STEP', 'TIME STEP',
        'OPTION FOR THE TREATMENT OF NON ERODABLE BEDS', 'GRAIN FEEDING',
        'MASS CONCENTRATION', 'MIXED SEDIMENTS',
        'MEAN DIAMETER OF THE SEDIMENT', 'HYDRODYNAMIC FILE', 'D90',
        'FORMALATION FOR DEPOSITION AND EROSION',
        'STARTING TIME IF THE HYDROGRAM', 'NUMBER OF TIDES OR FLOODS',
        'TIDE PERIOD', 'PRECONDITIONING', 'SOLVER',
        'MAXIMUM OF ITERATIONS FOR SOLVER', 'SOLVER OPTION', 'SOLVER ACCURACY',
        'PARTITIONNING TOOL', 'WATER DENSITY', 'GRAVITY ACCELERATION',
        'FRICTION COEFFICIENT', 'LAW OF BOTTOM FRICTION', 'DIFFUSION',
        'NUMBER IF SIZE-CLASSES OF BED MATERIAL', 'COHESIVE SEDIMENTS',
        'VERTICAL GRAIN SORTING MODEL', 'C-VSM MAIXMUM SECTIONS',
        'C-VSM FULL PRINTOUT PERIOD', 'C-VSM PRINTOUT SELECTION',
        'C-VSM DYNAMIC ALT MODEL', 'MASS-LUMPING', 'TYPE OF ADVECTION',
        'SUPG OPTION', 'OPTION FOR THE DIFFUSION OF TRACER',
        'NUMBER OF CORRECTIONS OF DISTRIBUTIVE SCHEMES',
        'NUMBER OF SUB-STEPS OF DISTRIBUTOVE SCHEMES',
        'TREATMENT OF FLUXES AT THE BOUNDARIES',
        'INITIAL SUSPENSION CONCENTRATIONS', 'OPTION FOR THE DISPERSION',
        'SCHEME OPTION FOR ADVECTION', 'CONSOLIDATION MODEL',
        'GEL CONCENTRATION', 'MAXIMUM CONCENTRATION',
        'PERMEABILITY COEFFICIENT', 'MUD CONSOLIDATION', 'TETA'
    ]

    sis_dico = path.join(environ['HOMETEL'], 'sources', 'sisyphe',
                         'sisyphe.dico')
    gaia_dico = path.join(environ['HOMETEL'], 'sources', 'gaia', 'gaia.dico')

    print(sis_cas_file, gaia_cas_file)
    sis_cas = TelemacCas(sis_cas_file, sis_dico)

    if path.exists(gaia_cas_file):
        remove(gaia_cas_file)

    gaia_cas = TelemacCas(gaia_cas_file, gaia_dico, access='w')

    for key, val in sis_cas.values.items():
        if key in removed_keys:
            # Special treatment
            pass
        else:
            # Simple translation
            new_key = trans_key.get(key, key)
            gaia_cas.set(new_key, val)

    gaia_cas.write(gaia_cas_file)
コード例 #10
0
def scan_xcas(fle):
    """
    @brief : read the xml file to extract the list of input file
    :param fle: xcas file of mascaret computation
    :return: list of file needed for computation
    """
    inputfile = []
    tree = ET.parse(fle)
    root = tree.getroot()
    root2 = root[0]

    # looking for geometry
    inputfile.append(root2.find('parametresGeometrieReseau')\
                                 .find('geometrie')\
                                 .find('fichier').text)

    # looking for laws
    lois = root2.find('parametresLoisHydrauliques').find('lois')
    for loi in lois:
        inputfile.append(loi.find('donnees').find('fichier').text)

    #looking for initial conditions
    linits = root2.find('parametresConditionsInitiales').find('ligneEau')
    if linits.find('LigEauInit').text == 'true':
        inputfile.append(linits.find('fichLigEau').text)

    #looking for "casier"
    if root2.find('parametresCasier') is not None:
        inputfile.append((root2.find('parametresCasier')\
                               .find('fichierGeomCasiers').text))

    #looking for "paramtresPhysique"
    if root2.find('parametresTraceur') is not None:
        root_tracer = root2.find('parametresTraceur')
        if root_tracer.find(
                'parametresConcentrationsInitialesTraceur') is not None:
            inputfile.append(\
                    root_tracer.find('parametresConcentrationsInitialesTraceur')\
                               .find('fichConcInit').text)

        if root_tracer.find('parametresNumeriquesQualiteEau') is not None:
            inputfile.append(root_tracer.find('parametresNumeriquesQualiteEau')\
                                        .find('fichParamPhysiqueTracer').text)
            inputfile.append(root_tracer.find('parametresNumeriquesQualiteEau')\
                                        .find('fichMeteoTracer').text)

        lois = root_tracer.find('parametresLoisTraceur').find('loisTracer')
        for loi in lois:
            inputfile.append(loi.find('fichier').text)

    #looking for "Courlis"
    if root2.find('parametresGeneraux').find('optionCourlis') is not None:
        inputfile.append((root2.find('parametresGeneraux')\
                                     .find('fichierMotCleCourlis').text))

        casfile = root2.find('parametresGeneraux')\
                       .find('fichierMotCleCourlis').text
        print(casfile)
        dicofile = path.join(CFGS.get_root(), "sources", "mascaret", "data",
                             "dico_Courlis.txt")

        cas = TelemacCas(casfile, dicofile)
        geo_courlis = cas.get('FICHIER DE GEOMETRIE COURLIS')
        inputfile.append(geo_courlis)

    return inputfile
コード例 #11
0
    def __call__(self, config_run=None):
        """
        Function controlling the coupling
        """
        # ---------------------------------------------------------
        # ~~~~~~~~~~~~~~~~~~~~~~~~~
        # ~~~~~ CONFIGURATION ~~~~~
        # ~~~~~~~~~~~~~~~~~~~~~~~~~

        if config_run is None:
            raise NameError('\nDICO NOT FOUND {}. '
                            'PROGRAM STOP. \n\n'.format('config_run'))
        self.configrun = config_run

        dicorun = self.configrun["Run"]
        ref_date = datetime.datetime.strptime(dicorun["RefDate"],
                                              '%d/%m/%Y %H:%M:%S')
        start_date = datetime.datetime.strptime(dicorun["StartDate"],
                                                '%d/%m/%Y %H:%M:%S')
        end_date = datetime.datetime.strptime(dicorun["EndDate"],
                                              '%d/%m/%Y %H:%M:%S')
        if "SingleExecDuration" in dicorun:

            iso8601_duration_re = re.compile(r'^(?P<sign>[-+]?)'
                                             r'(?:(?P<days>\d+(.\d+)?)d)?'
                                             r'(?:(?P<hours>\d+(.\d+)?)h)?'
                                             r'(?:(?P<minutes>\d+(.\d+)?)m)?'
                                             r'(?:(?P<seconds>\d+(.\d+)?)s)?'
                                             r'$')

            postgres_interval_re = re.compile(r'^'
                                              r'(?:(?P<days>-?\d+) (days? ?))?'
                                              r'(?:(?P<sign>[-+])?'
                                              r'(?P<hours>\d+):'
                                              r'(?P<minutes>\d\d):'
                                              r'(?P<seconds>\d\d)'
                                              r')?$')

            match = (postgres_interval_re.match(dicorun["SingleExecDuration"])
                     or iso8601_duration_re.match(
                         dicorun["SingleExecDuration"]))
            k_w = match.groupdict()
            days = datetime.timedelta(float(k_w.pop('days', 0) or 0))
            sign = -1 if k_w.pop('sign', '+') == '-' else 1
            k_w = {k: float(v) for k, v in k_w.items() if v is not None}
            length = days + sign * datetime.timedelta(**k_w)
        else:
            length = end_date - start_date

        self.param['startTime'] = int((start_date - ref_date).total_seconds())
        self.param['endTime'] = int((end_date - ref_date).total_seconds())
        self.param['singleRun'] = int(length.total_seconds())
        if "SaveCheckPoints" in dicorun:
            self.param['chkPts'] = dicorun["SaveCheckPoints"].lower() == "yes"
        else:
            self.param['chkPts'] = False

        self.param['reprise'] = dicorun["RestartFromFile"].lower() == "yes"

        if self.configrun["2D"][self.param['nom_mod_2d'][0]]["Parallel"] \
                .lower() == 'no':
            self.param['Run_type_2D'] = 'sequentiel'
            self.param['nb_proc_2D'] = 1
        else:
            self.param['Run_type_2D'] = 'parallele'
            self.param['nb_proc_2D'] = \
                self.configrun["2D"][self.param['nom_mod_2d'][0]]["NbProc"]

        nbrun = \
            math.ceil(float(self.param['endTime'] - self.param['startTime']) /
                      float(self.param['singleRun']))
        print("\n+--------------------------------------------------------+"
              "\n|------- LONGITUDINAL MASCARET TELEMAC2D COUPLING -------|"
              "\n+--------------------------------------------------------+"
              "\n|")
        print('|  Coupling Method:', self.couplingdef["Coupling"]["Method"])
        print('|  Max It. nr.:    ', self.couplingdef["Coupling"]["MaxIter"])
        print('|  Coupling TStep: ', self.couplingdef["Coupling"]["TimeStep"])
        print('|')
        print('|  Initial time:   ', dicorun["StartDate"])
        print('|  End time:       ', dicorun["EndDate"])
        if "SingleExecDuration" in dicorun:
            print('|  Split Run every:', dicorun["SingleExecDuration"],
                  "({} exec[s])".format(nbrun))
        else:
            print('|  Run not splitted ({} exec)'.format(nbrun))
        print('|  Restarted run:  ', dicorun["RestartFromFile"])
        if self.param['chkPts']:
            print('|  Checkpointed:    yes')
        print('|')
        print('|  1D models:      ', self.param['nom_mod_1d'])
        print('|  2D models:      ', self.param['nom_mod_2d'])
        print('|')
        print('|  Interfaces:      ')
        for itf in self.couplingdef["Interfaces"]:
            print(
                '|    {:<12.12s} ({:<10.10s}) {:<10.10s} of {:<12.12s}'.format(
                    "{}:{}".format(itf["Id1D"], itf["IdExtr1D"]),
                    itf["Condition1D"], itf["1DPosition"],
                    "{}:{}".format(itf["Id2D"], itf["LiqBdry2D"])))
        print("+--------------------------------------------------------+\n")

        # ~~~~~~~~~~~~~~~~~~~~~~~~~
        # ~~~~~ PRELIMINAIRES ~~~~~
        # ~~~~~~~~~~~~~~~~~~~~~~~~~

        start = time.time()
        previous_path = os.getcwd()
        os.chdir(self.case_path)

        self.exec_creation()
        os.chdir(os.path.join(self.case_path, 'EXEC'))
        self.cas_file_tmp = os.getcwd()
        self.preparation_model()
        # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # # ~~~ LANCEMENT DU MODELE COUPLE ~~~~~
        # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        print("\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"
              "~~~~~   RUN THE COUPLED MODEL    ~~~~~\n"
              "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
        tdeb = self.param['startTime']
        freq = self.param['singleRun']

        tfinal = tdeb + math.ceil((self.param['endTime'] - tdeb) / freq) * freq

        while tdeb < tfinal:
            tstart = tdeb
            tstop = tdeb + freq

            #
            #    # ~~~ MAJ des fichiers .cas ~~~~~
            #    # ~~~~~~
            #    # ~~~ Fichier .cas 2D ~~~~~

            filename = os.path.join(self.cas_file_tmp, 'T2DCAS')
            cas = TelemacCas(filename, self.dico_file, check_files=False)
            cas.set('DURATION', float(freq))

            if tdeb > self.param['startTime']:
                self.param['reprise'] = True
                cas.set('SUITE DE CALCUL', True)
            cas.write(filename)

            current_run = {"Run": {}}
            current_run["Run"]["StartTime"] = tstart
            current_run["Run"]["RunLength"] = freq
            current_run["Run"]["CplTSteps"] = \
                math.ceil(self.param['singleRun'] / self.param['dt_couplage'])
            current_run["Run"]["Restarted"] = self.param['reprise']
            current_run["Run"]["InitialRun"] = bool(tdeb == 0.)
            current_run["1D"] = {}
            i_b = 0
            while i_b < self.param['nb_mod_1d']:
                name = self.param['nom_mod_1d'][i_b]
                current_run["1D"][name] = {}
                folder_lig = \
                    os.path.dirname(self.dico_file_model1d[name]["lig"])
                file = "WaterLine_{}_{}.lig"\
                    .format(name, ('%.6f' % tstart).rstrip('0').rstrip('.'))
                file_lig = os.path.join(folder_lig, file)
                if not os.path.exists(file_lig):
                    raise IOError(
                        "ERROR: MISSING WATERLINE FILE {}".format(file_lig))
                current_run["1D"][name]["WaterLineFile"] = file_lig

                file = "bc1D_restart_{}_{}.json"\
                    .format(name, ('%.6f' % tstart).rstrip('0').rstrip('.'))
                file_bc = os.path.join(folder_lig, file)
                if os.path.exists(file_bc):
                    copy2(file_bc, os.path.join(self.case_path, 'EXEC'))

                current_run['config_{}'
                            .format(name)] =\
                    self.list_dico_mod['config_{}'.format(name)]

                i_b += 1
            current_run['coupling_def'] = self.couplingdef
            with open('CurrentRunDef.json', 'w') as f_p:
                json.dump(current_run, f_p, indent=4)
            #
            #    # ~~~ Creation of results directory from simulate time ~~~~~
            try:
                dirpath = 'COUPLING_FROM_{}'.format(tdeb)
                dirpath = os.path.join(self.case_path, 'Results', dirpath)
                os.makedirs(dirpath)
            except OSError:
                pass
            i_b = 0
            while i_b < self.param['nb_mod_1d']:
                pattern = 'listing .*$'
                filedst = os.path.join(
                    self.case_path, "Results", "COUPLING_FROM_{}".format(tdeb),
                    "ResultatsListing_{}.lis".format(
                        self.param['nom_mod_1d'][i_b]))
                repl = 'listing ' + filedst

                key = self.param['nom_mod_1d'][i_b]
                pattern_compiled = re.compile(pattern)
                line = self.dico_file_model1d[key]["listing"]
                self.dico_file_model1d[key]["listing"] \
                    = pattern_compiled.sub(repl, line)

                i_b += 1
            #    # ~~~  Suppression of old listing files of MASCARET~~~~~
            #    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            globpattern = os.path.join(self.case_path, 'Results',
                                       'COUPLING_FROM_{}'.format(tdeb), 'Res')
            globpattern = globpattern + "*.lis"
            for file in glob.glob(globpattern):
                os.remove(file)

            #    # ~~~ COMPUTE THE COUPLING ~~~~~
            #    # ~~~~~~~~~~~~~~~~~~~~~~~
            listing_file = os.path.basename(self.cas_file)
            listing_file = '{}.stdout'.format(listing_file)
            print("Running the coupled model between init time {} "
                  "and end time {}".format(tstart, tstop))

            cmd = mpirun_cmd()
            cmd = cmd.replace(
                '<ncsize>',
                str(int(self.param['nb_mod_1d'] + self.param['nb_proc_2D'])))

            launch_py = os.path.join(os.environ['HOMETEL'], 'scripts',
                                     'python3', 'run_cpl.py')
            launch_exe = "{0} launcher --n1d {1} > {2}"\
                .format(launch_py, self.param['nb_mod_1d'], listing_file)

            cmd = cmd.replace('<exename>', launch_exe)

            _, return_code = shell_cmd(cmd)

            if return_code != 0:
                raise Exception("\nERROR IN COUPLING RUN.\n"
                                "THE COMMAND IS : {} \n"
                                " PROGRAM STOP.\n".format(cmd))

            #
            #    # ~~~ Suppression of  MASCARET listing files~~~~~
            #    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            globpattern = os.path.join(self.case_path, 'Results',
                                       'COUPLING_FROM_{}'.format(tdeb), 'Res')
            globpattern = globpattern + "*.lis"
            for file in glob.glob(globpattern):
                os.remove(file)
            #
            #    # ~~~ Creation reprise files ~~~~~
            #    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            #
            #    # ~~~ Creation 1D reprise files ~~~~~
            i_b = 0
            while i_b < self.param['nb_mod_1d']:
                folder_lig = os.path.dirname(self.dico_file_model1d[
                    self.param['nom_mod_1d'][i_b]]["lig"])
                name_file = 'WaterLine_{}_*.lig'.format(
                    self.param['nom_mod_1d'][i_b])
                for file in glob.glob(name_file):
                    file_mv(file, os.path.join(folder_lig, file))
                if self.param['chkPts'] or tdeb + freq == tfinal:
                    name_file = 'bc1D_restart_{}_{}.json' \
                        .format(self.param['nom_mod_1d'][i_b], tdeb + freq)
                    copy2(name_file, os.path.join(folder_lig, name_file))
                i_b += 1
            if tdeb + freq < tfinal:
                #    # ~~~ Creation 2D reprise files ~~~~~
                chop_step = int(
                    freq /
                    (self.param['freq_res'] * self.param['dt_couplage']) + 1)

                if self.param['Run_type_2D'].lower() == 'parallele':
                    prepath_para = os.path.join(self.cas_file_tmp, 'T2DPRE')
                    globpattern = prepath_para + "*"
                    for file in glob.glob(globpattern):
                        os.remove(file)

                slf = ChopSelafin('T2DRES', (chop_step, chop_step, chop_step))
                slf.put_content('T2DPRE')

            #    # ~~~ Creation result files ~~~~~
            #    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            #
            #    # ~~~  .opt files~~~~~
            i_b = 0
            while i_b < self.param['nb_mod_1d']:
                filesrc = "study_par_{}".format(self.param['nom_mod_1d'][i_b])
                filesrc = os.path.join(
                    filesrc, "output", "ResultatsOpthyca_{0}.opt".format(
                        self.param['nom_mod_1d'][i_b]))
                filedst = os.path.join(
                    self.case_path, "Results", "COUPLING_FROM_{}".format(tdeb),
                    "ResultatsOpthyca_{}.opt".format(
                        self.param['nom_mod_1d'][i_b]))
                file_mv(filesrc, filedst)

                filesrc = "study_par_{}".format(self.param['nom_mod_1d'][i_b])
                filesrc = \
                    os.path.join(filesrc, "output",
                                 self.dico_file_model1d[
                                    self.param['nom_mod_1d'][i_b]]['listing'])
                filedst = \
                    os.path.join(self.case_path, "Results",
                                 "COUPLING_FROM_{}".format(tdeb),
                                 self.dico_file_model1d[
                                    self.param['nom_mod_1d'][i_b]]['listing'])
                file_mv(filesrc, filedst, verbose=False)
                i_b += 1

            filedst = os.path.join(self.case_path, "Results",
                                   "COUPLING_FROM_{}".format(tdeb),
                                   listing_file)
            file_mv(listing_file, filedst)

            #    # ~~~  T2D .slf FILES  ~~~~~
            result_file = os.path.basename(self.cas_file)
            result_file = '{}_Resultats.slf'.format(result_file)
            restart_dir = os.path.dirname(self.cas_file)
            restart_file = os.path.join(
                restart_dir, 'WaterLineInit_{}.slf'.format(tdeb + freq))

            filedst = os.path.join(self.case_path, "Results",
                                   "COUPLING_FROM_{}".format(tdeb),
                                   result_file)
            if self.param['chkPts'] or tdeb + freq == tfinal:
                copy2("T2DRES", restart_file)
            file_mv("T2DRES", filedst)

            if self.couplingdef["Coupling"]["Method"].lower() == \
                "additiveschwarz" and \
                    (self.param['chkPts'] or tdeb + freq == tfinal):
                restart_dir = os.path.dirname(self.cas_file)
                i_b = 0
                while i_b < self.param['nb_mod_2d']:
                    restart_file = 'bc2D_restart_{}_{}.json'. \
                        format(self.param['nom_mod_2d'][i_b], tdeb + freq)
                    copy2(restart_file, os.path.join(restart_dir,
                                                     restart_file))
                    i_b += 1

            filedst = os.path.join(self.case_path, "Results",
                                   "COUPLING_FROM_{}".format(tdeb),
                                   "Convergence_criteria.out")
            file_mv("Convergence_criteria.out", filedst)
            filedst = os.path.join(self.case_path, "Results",
                                   "COUPLING_FROM_{}".format(tdeb),
                                   "Convergence_criteria.csv")
            file_mv("Convergence_criteria.csv", filedst)
            tdeb = tdeb + freq

            if os.path.isdir(
                    os.path.join(self.case_path, 'EXEC', '__pycache__')):
                rmtree(os.path.join(self.case_path, 'EXEC', '__pycache__'))

        # # ~~~~~~~~~~~~~~~~~~~~~~
        # # ~~~ FINALISATION ~~~~~
        # # ~~~~~~~~~~~~~~~~~~~~~~
        #
        # # ~~~ RETURN INITIAL PATH ~~~~~
        os.chdir("..")
        # cd ..
        end = time.time()
        print("My work is done. Coupled job lasted : {} s \n".format(end -
                                                                     start))
        os.chdir(previous_path)
コード例 #12
0
    def preparation_model(self):
        """
        Preparation of model
        1. copy file for coupling
        2. Modification parameters file
        """
        # # ~~~ Identification of TELAMAC files ~~~~~
        # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        tel_file = "config_{}".format(self.param['nom_mod_2d'][0])

        if tel_file in self.list_dico_mod.keys():
            dico = self.list_dico_mod[tel_file]
        else:
            raise NameError('\nDICO NOT FOUND {}. '
                            'PROGRAM STOP. \n\n'.format(tel_file))
        self.lec_telfile(dico['files'])

        tstart = self.param['startTime']

        # configuration define
        # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        root_dir = os.path.expandvars('$HOMETEL')
        python_dir = os.path.join(root_dir, 'scripts', 'python3')
        CFGS.parse_cfg_file(self.systelcfg, self.usetelcfg, root_dir,
                            python_dir)
        CFGS.compute_modules_info()
        CFGS.compute_system_info()
        CFGS.compute_partel_info()
        CFGS.compute_mpi_info()

        #
        # # ~~~ Modification of .cas files et config files in function
        # #      to 2D run type  ~~~~~
        # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        if self.param['Run_type_2D'].lower() == 'sequentiel':
            self.param['nb_proc_2D'] = 1

        # # ~~~ Creating symbolic link for initial water line if reprise ~~~~~
        # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        folder_cas = os.path.dirname(self.cas_file)

        if self.param['reprise']:
            file_tmp = os.path.join(folder_cas,
                                    'WaterLineInit_{}.slf'.format(tstart))
            if not os.path.exists(file_tmp):
                raise Exception("ERROR. INTIAL WATER LINE"
                                "OF 2D MODEL IS MISSED."
                                " STOP.")
            else:
                path_tmp2 = os.path.join(folder_cas, "WaterLineInit_in.slf")
                if os.path.exists(path_tmp2):
                    os.remove(path_tmp2)
                path_ori = 'WaterLineInit_{}.slf'.format(tstart)
                os.symlink(path_ori, path_tmp2)

        #
        i_b = 0
        while i_b < self.param['nb_mod_2d']:
            file_tmp = os.path.join(
                folder_cas,
                'bc2D_restart_{}_{}.json'.format(self.param['nom_mod_2d'][i_b],
                                                 tstart))
            if os.path.exists(file_tmp):
                copy2(file_tmp, '.')
            i_b += 1

        # print("~~~~~ CREATION TEMPORARY DATA FILE OF TELEMAC ~~~~~")
        os.chdir(os.path.realpath(os.path.dirname(self.cas_file)))
        my_study = Study(os.path.basename(self.cas_file), 'telemac2d',
                         self.cas_file_tmp)
        os.chdir(self.cas_file_tmp)
        self.dico_file = my_study.dico_file
        # creation  split files
        my_study.fill_working_dir()
        my_study.ncsize = self.param['nb_proc_2D']
        my_study.generate_mpi_files()

        #
        # ~~~ Test the consistence between the last time
        # of restart file and specific time ~~~~
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        if self.param['reprise']:
            res = TelemacFile('T2DPRE')
            last_time_t2dpre = float(res.times[-1])
            res.close()
            if last_time_t2dpre != tstart:
                raise Exception(
                    "\nERROR, THE RESTART TIME OF PARAMETERS FILES {} "
                    "AND OF RESTART {} AREN'T THE SAME."
                    "PROGRAM STOP.\n\n".format(tstart, last_time_t2dpre))

        # # ~~~ Check data of Mascaret  ~~~~~
        # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

        i_b = 0
        while i_b < self.param['nb_mod_1d']:
            name_dict = "config_{}".format(self.param['nom_mod_1d'][i_b])
            if name_dict in self.list_dico_mod.keys():
                dico = self.list_dico_mod[name_dict]
                self.dico_file_model1d[self.param['nom_mod_1d'][i_b]] = \
                    dico['files']
            i_b += 1
        #
        #
        # # ~~~ Modification .cas file from coupling data ~~~~~
        # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        #
        # # ~~~ 2D.cas  file~~~~~
        # print('~~~~~~~~~~Modification of .cas file~~~~~~~~~~\n')
        periode_sorties = \
            int(self.param['freq_res'] * self.param['dt_couplage']
                / self.param['dt_2d'])
        filecas = os.path.join(self.cas_file_tmp, 'T2DCAS')
        cas = TelemacCas(filecas, self.dico_file, check_files=False)

        cas.set('TIME STEP', self.param['dt_2d'])
        cas.set('GRAPHIC PRINTOUT PERIOD', periode_sorties)
        cas.set('LISTING PRINTOUT PERIOD', periode_sorties)
        cas.set('GEOMETRY FILE', 'T2DGEO')
        cas.set('BOUNDARY CONDITIONS FILE', 'T2DCLI')
        cas.set('RESULTS FILE', 'T2DRES')
        if 'LIQUID BOUNDARIES FILE' in cas.values.keys():
            cas.set('LIQUID BOUNDARIES FILE', 'T2DIMP')

        if 'PREVIOUS COMPUTATION FILE' in cas.values.keys():
            cas.set('PREVIOUS COMPUTATION FILE', 'T2DPRE')

        if self.param['reprise']:
            cas.set('COMPUTATION CONTINUED', True)
        else:
            cas.set('COMPUTATION CONTINUED', False)

        cas.write(filecas)
コード例 #13
0
def main():
    """ Main function of manip_cas.py """
    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nLoading Options and Configurations\n' + 72 * '~' + '\n')
    parser = argparse.ArgumentParser(description='Translate a keyword')
    parser = add_config_argument(parser)
    parser.add_argument("module",
                        choices=[
                            'postel3d', 'telemac2d', 'telemac3d', 'tomawac',
                            'artemis', 'sisyphe', 'waqtel', 'khione', 'stbtel'
                        ],
                        help="Name of the module for which to translate")
    parser.add_argument(
        "-t",
        "--translate",
        action="store_true",
        dest="translate",
        default=False,
        help="Generate a french and english version of the steering file "
        "(cas_file suffixed with _fr and _gb)")

    parser.add_argument(
        "-s",
        "--sort",
        action="store_true",
        dest="sort",
        default=False,
        help="Rewrites the steering file using rubriques to sort the keywords "
        "cas_file suffixed with _sorted")

    parser.add_argument("--keep-comments",
                        action="store_true",
                        dest="keep_comments",
                        default=False,
                        help="When sorting will append all original comments "
                        "at the end of the file")

    parser.add_argument("cas_file", help="Name of the steering file to read")

    args = parser.parse_args()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    update_config(args)
    cfg = CFGS.configs[CFGS.cfgname]
    CFGS.compute_execution_info()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # Searching for the dictionary associated with the steering case
    dico_file = path.join(cfg['MODULES'][args.module]['path'],
                          args.module + '.dico')
    if not path.exists(dico_file):
        raise TelemacException(\
            'Could not find the dictionary file: {}'.format(dico_file))
    cas = TelemacCas(args.cas_file, dico_file, check_files=False)

    check_cas(cas)

    if args.translate:
        translate(cas)
    if args.sort:
        sort(cas, args.keep_comments)

    print('\n\nMy work is done\n\n')
    sys.exit(0)
コード例 #14
0
def pre_api(my_vnv_study):
    """
    Duplicate study for api run

    @param my_vnv_study (vnv_study) The study in which to add api runs
    """

    _, old_time = my_vnv_study.action_time['pre']
    my_vnv_study.action_time['pre'] = [False, 0.0]
    start_time = time.time()

    for name, study in my_vnv_study.studies.items():
        api_name = name + "_api"

        # We need to copy all the files from study into the new study
        # Build directory for api study
        api_vnv_working_dir = my_vnv_study.build_vnv_working_dir(api_name)

        # Temporary study just to copy the files

        study.copy_files(api_vnv_working_dir,
                         verbose=my_vnv_study.options.verbose,
                         copy_cas_file=True)

        cmd = "cd {wdir} && mpirun -n {ncsize} template.py {module} {cas} --double-run"\
                .format(wdir=api_vnv_working_dir,
                        ncsize=max(1, study.ncsize),
                        module=study.code_name,
                        cas=path.basename(study.steering_file))

        # Handle in coupling cases when the coupled steering file is using the
        # same file as the main one
        # This does not work with the api as we do not have a temporary folder
        cas = study.cas
        in_files = [cas.get(key) for key in cas.in_files]

        pwd = getcwd()

        chdir(api_vnv_working_dir)

        modified = False
        for mod, tmp_cpl_cas in study.cpl_cases.items():
            # Looking for input file that is in both the main steering and the
            # coupled one
            cpl_cas = TelemacCas(path.basename(tmp_cpl_cas.file_name),
                                 get_dico(mod))
            for key in cpl_cas.in_files:
                ffile = cpl_cas.get(key)
                if ffile in in_files:
                    root, ext = path.splitext(ffile)
                    new_file = root + '_' + mod + ext
                    print(" ~> Copying {} -> {}".format(ffile, new_file))
                    # Copying file
                    shutil.copy2(path.join(api_vnv_working_dir, ffile),
                                 path.join(api_vnv_working_dir, new_file))
                    print(" ~> Modifying in {}: {}".format(mod, key))
                    # Changing value in steering file
                    cpl_cas.set(key, new_file)

                    modified = True
            # If we made some modification overwritting the steering case
            if modified:
                cpl_cas.write(path.join(api_vnv_working_dir,
                                        cpl_cas.file_name))
            del cpl_cas

        chdir(pwd)

        my_vnv_study.add_command(api_name, cmd)

    end_time = time.time()
    # Updating action_time information
    my_vnv_study.action_time['pre'] = [True, old_time + end_time - start_time]