Beispiel #1
0
    def write_fr_gb(self, output_dir=''):
        """
        Write an english and french version of the steering file

        @param output_dir (string) If given translated steering files are
        written into it otherwise they are written next to the steering file
        """
        with open(self.file_name, 'r') as f:
            cas_lines = f.readlines()

        core = []
        for cas_line in cas_lines:
            # ~~> scan through to remove all comments
            cas_line = cas_line.replace('"""', "'''").replace('"', "'")
            proc = re.match(KEY_COMMENT, cas_line + '/')
            head = proc.group('before').strip()
            core.append(head)
        cas_stream = ' '.join(core)

        fr_lines = []
        gb_lines = []
        for cas_line in cas_lines:

            # ~~> split comments
            cas_line = cas_line.replace('"""', "'''").replace('"', "'")
            proc = re.match(KEY_COMMENT, cas_line)
            head = proc.group('before').strip()
            # /!\ is not translated
            tail = proc.group('after').strip()
            # ~~ special keys starting with '&'
            proc = re.match(KEY_NONE, head + ' ')
            if proc:
                head = ''
                tail = cas_line.strip()
            frline = head
            gbline = head

            # ~~> this is a must for multiple keywords on one line
            cas_stream = cas_line
            while cas_stream != '':
                proc = re.match(KEY_EQUALS, cas_stream)
                if not proc:
                    raise TelemacException('Unhandled error\n    '
                                           'around there :' + cas_stream[:100])
                keyword = proc.group('key').strip()
                if keyword not in head:
                    break  # move on to next line
                # If just a value skipping
                if '=' not in head and ':' not in head:
                    break  # move on to next line
                # If only part of a string value skipping
                if (head.count("'") == 1 and (
                    "L'" not in keyword and "D'" not in keyword)) or \
                        head.count('"') == 1:
                    break

                # ~~> translate the keyword
                head = head.replace(keyword, '', 1)
                if keyword.upper() in self.dico.gb2fr:
                    frline = frline.replace(keyword, self.dico.gb2fr[keyword],
                                            1)
                if keyword.upper() in self.dico.fr2gb:
                    gbline = gbline.replace(keyword, self.dico.fr2gb[keyword],
                                            1)

                # ~~> look for less obvious keywords
                cas_stream = proc.group('after')  # still hold the separator
                proc = re.match(VAL_EQUALS, cas_stream)
                if not proc:
                    raise Exception('No value to keyword: ' + keyword)
                while proc:
                    cas_stream = proc.group('after')
                    proc = re.match(VAL_EQUALS, cas_stream)

            # final append
            fr_lines.append(frline + tail)
            gb_lines.append(gbline + tail)

        # ~~ print FR and GB versions of the CAS file
        if output_dir != '':
            file_name = path.join(output_dir, path.basename(self.file_name))
        else:
            file_name = self.file_name
        base, ext = path.splitext(file_name)
        fr_name = base + "_fr" + ext
        gb_name = base + "_en" + ext
        with open(fr_name, 'w') as f:
            f.write('\n'.join(fr_lines))
        with open(gb_name, 'w') as f:
            f.write('\n'.join(gb_lines))
def parse_array_frame(string, size=-1):
    """
    @brief     Decoding structure all in order
       The list of frames is delimiting points either by ',' or ';',
          and the ranges by ':'
       The output is an arry [..]. Each term is either:
          (a) an integer, representing a frame or a node or a plane for instance
          (b) a 1D-tuple of a real value, representing a time or a depth
          (c) a 3D-tuple of integers, representing an array range [0;-1;1] by
              default
    @examples of input / output
       '5'         =>  [5]
       '[4]'       =>  [4]
       '[5,6,7,0]' =>  [5, 6, 7, 0]
       '(5.6)'     =>  [(5.6,)]
       '(76);(4),[(3.3);4:14:2;0:6;8]'
                   =>  [(76.0,), (4.0,), (3.3,), (4, 14, 2), (0, 6, 1), 8]

    """
    frames = []

    # ~~ Special deal of all times ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    if string == '[]':
        if size >= 0:
            return [range(size)]
        else:
            return [[0, -1, 1]]

    # ~~ Identify individual frames ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    for node in re.findall(SIMPLE, string + ','):

        # ~~> Is it a time (itime) or a frame / range
        itime = node[1]
        proci = re.match(RNG2D, itime)
        procr = re.match(PNT2D, itime)
        if proci:
            r_t = proci.group('n').split(':')
            if len(r_t) == 1:
                frame_a = [int(r_t[0])]
                if size >= 0:
                    if frame_a[0] < 0:
                        frame_a[0] = max(0, size + frame_a[0])
                    else:
                        frame_a[0] = min(frame_a[0], size - 1)
                    frame_a = range(frame_a[0], frame_a[0] + 1, 1)
            else:
                if len(r_t) == 2:
                    frame_a = [int(r_t[0]), int(r_t[1]), 1]
                if len(r_t) == 3:
                    frame_a = [int(r_t[0]), int(r_t[1]), int(r_t[2])]
                if size >= 0:
                    if frame_a[0] < 0:
                        frame_a[0] = max(0, size + frame_a[0])
                    else:
                        frame_a[0] = min(frame_a[0], size - 1)
                    if frame_a[1] < 0:
                        frame_a[1] = max(0, size + frame_a[1])
                    else:
                        frame_a[1] = min(frame_a[1], size - 1)
                    frame_a = range(frame_a[0], frame_a[1] + 1, frame_a[2])
        elif procr:
            frame_a = (float(procr.group('n')), )
        else:
            raise TelemacException(\
                    '... could not parse the point <{}>'
                    ' from the string "{}"'.format(node[0], string))

        # ~~> Final packing
        frames.extend(frame_a)

    return frames
def check_python_rank_tags(py_file, options):
    """
    Checks if a Python vnv script match the rank and tags options
    """
    val_dir = path.dirname(py_file)

    chdir(val_dir)

    # Importing vnv_class from py_file
    try:
        # Code foor Python 3.5+
        import importlib.util
        # This allow Python script decalared in the example folder to be loaded
        sys.path.append(val_dir)
        spec = importlib.util.spec_from_file_location("vnv_module", py_file)
        vnv_stuff = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(vnv_stuff)
    except:
        from importlib.machinery import SourceFileLoader

        vnv_stuff = SourceFileLoader("vnv_module", py_file).load_module()

    name = path.splitext(py_file)[0]
    my_vnv_study = vnv_stuff.VnvStudy(name, val_dir, options)

    # Checkcing ranks if will run all ranks <= options.rank
    rank = my_vnv_study.rank
    rank_ok = rank <= options.rank

    if not rank_ok and options.verbose:
        print('\n ~> ' + py_file)
        print('     > nothing to do here (rank):')
        print('       {} > {}: '.format(rank, options.rank))

    # Checking tags will run all the test contains one of the tags in
    # options.tags And skip the ones with a - before them (-mascaret will
    # skip file with mascaret tag)
    tags = my_vnv_study.tags
    if tags != []:
        tags_ok = False
        opt_tags = options.tags.split(',')

        if '+' in options.tags:
            for opt_tag in opt_tags:
                if '+' in opt_tag:
                    # All the tag in opt_tag must be in tag
                    tag_ok = True
                    for tag2 in opt_tag.split('+'):
                        tag_ok = tag_ok and (tag2 in tags)
                elif '-' in opt_tag:
                    # - means reverse tag must no be in
                    tag_ok = opt_tag[1:] not in tags
                else:
                    tag_ok = opt_tag in tags
                tags_ok = tags_ok or tag_ok
        else:
            for tag in tags:
                # If -tag that means that the Python should not be run
                if '-' + tag in opt_tags:
                    tags_ok = False
                    break
                tag_ok = tag in opt_tags
                # Checking that at least one of the tags is in opt_tags
                tags_ok = tags_ok or tag_ok
    else:
        raise TelemacException("Missing tag in Python file:\n" + py_file)

    if not tags_ok and options.verbose:
        print('\n ~> ' + py_file)
        print('     > nothing to do here (tag):')
        print('       File tags: {}'.format(','.join(tags)))
        print('       Options tags: {}'.format(options.tags))

    # Cleaning up sys.path
    sys.path.remove(val_dir)

    return tags_ok and rank_ok
Beispiel #4
0
def to_latlon(easting,
              northing,
              zone_number,
              zone_letter=None,
              northern=None,
              strict=True):
    """
    This function convert an UTM coordinate into Latitude and Longitude

    @param easting (int)
            Easting value of UTM coordinate

    @param northing (int)
            Northing value of UTM coordinate

    @param zone number (int)
            Zone Number is represented with global map numbers of an UTM Zone
            Numbers Map. More information see utmzones [1]_

    @param zone_letter: str
            Zone Letter can be represented as string values. Where UTM Zone
            Designators can be accessed in [1]_

    @param northern: bool
            You can set True or False to set this parameter. Default is None


       .. _[1]: http://www.jaworski.ca/utmzones.htm

    @returns (np.array, np.array) longitude, latitude
    """
    if not zone_letter and northern is None:
        raise ValueError('either zone_letter or northern needs to be set')

    if zone_letter and northern is not None:
        raise ValueError('set either zone_letter or northern, but not both')

    if strict:
        if not in_bounds(easting, 100000, 1000000, upper_strict=True):
            raise TelemacException(
                'easting out of range (must be between 100.000m and 999.999m)')
        if not in_bounds(northing, 0, 10000000):
            raise TelemacException(
                'northing out of range (must be between 0m and 10.000.000m)')

    check_valid_zone(zone_number, zone_letter)

    if zone_letter:
        zone_letter = zone_letter.upper()
        northern = (zone_letter >= 'N')

    x = easting - 500000
    y = northing

    if not northern:
        y -= 10000000

    m = y / K0
    m_u = m / (R * M1)

    p_rad = (m_u + P2 * np.sin(2 * m_u) + P3 * np.sin(4 * m_u) +
             P4 * np.sin(6 * m_u) + P5 * np.sin(8 * m_u))

    p_sin = np.sin(p_rad)
    p_sin2 = p_sin * p_sin

    p_cos = np.cos(p_rad)

    p_tan = p_sin / p_cos
    p_tan2 = p_tan * p_tan
    p_tan4 = p_tan2 * p_tan2

    ep_sin = 1 - E * p_sin2
    ep_sin_sqrt = np.sqrt(1 - E * p_sin2)

    n = R / ep_sin_sqrt
    r_0 = (1 - E) / ep_sin

    c_0 = _E * p_cos**2
    c_2 = c_0 * c_0

    d_0 = x / (n * K0)
    d_2 = d_0 * d_0
    d_3 = d_2 * d_0
    d_4 = d_3 * d_0
    d_5 = d_4 * d_0
    d_6 = d_5 * d_0

    latitude = (
        p_rad - (p_tan / r_0) *
        (d_2 / 2 - d_4 / 24 *
         (5 + 3 * p_tan2 + 10 * c_0 - 4 * c_2 - 9 * E_P2)) + d_6 / 720 *
        (61 + 90 * p_tan2 + 298 * c_0 + 45 * p_tan4 - 252 * E_P2 - 3 * c_2))

    longitude = (
        d_0 - d_3 / 6 * (1 + 2 * p_tan2 + c_0) + d_5 / 120 *
        (5 - 2 * c_0 + 28 * p_tan2 - 3 * c_2 + 8 * E_P2 + 24 * p_tan4)) / p_cos

    return (np.degrees(longitude) +
            zone_number_to_central_longitude(zone_number),
            np.degrees(latitude))
def parse_array_point(string, size=-1):
    """
    @brief     Decoding structure all in order
       The list of frames is delimiting points either by ',' or ';',
          and the ranges by ':'
       The output is an arry [..]. Each term is complicated ...
    @examples of input / output
       # either a 2D node value or a vertical 1D profile covering all planes
       # above the 2D node
       '5'  =>  [(5, [(0, -1, 1)])]
       '(5)' =>  [(5, [(0, -1, 1)])]
       '9@2,58#3,18,4#1,4#1,[email protected],[email protected]'
          =>  [(9, ([2.0, -1],)),
               (58, [3]),
               (18, [(0, -1, 1)]),
               (4, [1]),
               (4, [1]),
               (76, ([0.0, -1],)),
               (8, ([0.5, -1],))]
       '(4,5,6),[]#900'
          =>  [((4.0, 5.0, 6.0), [(0, -1, 1)]),
               ([], [900])]
       '(3;4,5)#[]'
          =>  [(3, [(0, -1, 1)]),
               (4, [(0, -1, 1)]),
               (5, [(0, -1, 1)])
       '(4;5,6)#[5:4;6;0:-1:2]'
          =>  [((4.0, 5.0, 6.0), [(5, 4, 1), 6, (0, -1, 2)])]
       '9@2,58#3,18,(4;7)#1,4#1,(76;4)@1.e-1,[8@(0.5;0.7)'
          =>  [(9, ([2.0, -1],)),
               (58, [3]),
               (18, [(0, -1, 1)]),
               ((4.0, 7.0), [1]),
               (4, [1]),
               ((76.0, 4.0), ([0.1, -1],)),
               (8, ([0.5, -1],[0.7, -1]))]
       '(4;5,6)#[5;6]'
          =>  [((4.0, 5.0, 6.0), [5, 6])]
       '(4;5,6)@(-5#3;6)'
          =>  [((4.0, 5.0, 6.0), ([-5.0, 3], [6.0, -1]))]
    """

    points = []

    # ~~ Special deal of all points ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    if string == '':
        if size >= 0:
            return [([], range(size))]
        else:
            return [([], [0])]

    # ~~ Identify individual points ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    for node in re.findall(COMPLX, string + ','):

        # ~~> Is it a point (x,y) or a node n
        x_y = node[1]
        proci = re.match(NOD2D, x_y)
        procs = re.match(SPL2D, x_y)
        procr = re.match(PNT2D, x_y)
        proce = re.match(EMPTY, x_y)
        if proci:
            point_a = int(proci.group('n'))
        elif procr:
            x_y = procr.group('n').replace(',', ';').split(';')
            if len(x_y) == 2:
                point_a = (float(x_y[0]), float(x_y[1]))
            if len(x_y) != 2:
                raise TelemacException(\
                        '... we are not allowing anything '
                        'but a pair (x,y). You have: <{}> '
                        'from the string "{}"\n'
                        '+> if you need (x,y,z) you should use a depth '
                        'above plan 0: (x,y)@z#0'.format(node[0], string))
        elif proce:
            point_a = []
        elif procs:
            x_y = procs.group('n').replace(',', ';').split(';')
            if len(x_y) == 2:
                point_a = (int(x_y[0]), int(x_y[1]))
            elif len(x_y) == 3:
                point_a = (int(x_y[0]), int(x_y[1]), int(x_y[2]))
            else:
                raise TelemacException(\
                  '... could not parse the number of re-sampling steps. '
                  'You have: <{}>from the string "{}"'.format(node[0], string))

            points.append(point_a)
            continue
        else:
            raise TelemacException(\
                    '... could not parse the point <{}> '
                    'from the string "{}"'.format(node[0], string))

        # ~~> Is it a depth d or a plane p or both
        point_b = []
        if node[2] != '':
            t_p = node[2][0]
            z_p = node[2][1:]
            if t_p == '#':  # this is a plane or a series of planes
                proci = re.match(RNG2D, z_p)
                if proci:
                    z_p = '[' + z_p + ']'
                point_b = parse_array_frame(z_p, size)
            # this is a depth or a series of depth, referenced by planes
            if t_p == '@':
                procr = re.match(NUMBR, z_p)
                if procr:
                    z_p = '(' + z_p + ')'
                procp = re.match(PNT2D, z_p)
                if procp:
                    point_b = []
                    for pnt in procp.group('n').replace(',', ';').split(';'):
                        if '#' in pnt:
                            tmp_a, tmp_b = pnt.split('#')
                            point_b.append([float(tmp_a), int(tmp_b)])
                        else:
                            # from the surface plane by default
                            point_b.append([float(pnt), -1])
                    point_b = tuple(point_b)
        else:
            if size >= 0:
                point_b = range(size)
            else:
                point_b = [0, -1, 1]

        # ~~> Final packing
        points.append((point_a, point_b))

    return points
def to_lat_long(easting, northing, zone):

    # ~~> easting
    easting = easting - 500000
    mineast = np.min(easting)
    maxeast = np.max(easting)
    if mineast < -1000000 or maxeast > 1000000:
        raise TelemacException(\
                '... Easting out of range '
                '(must be between 100 km and 1000 km)')

    maxnord = np.max(northing)
    if maxnord < 0:
        northing = northing - 10000000  # South and North
    minnord = np.min(northing)
    maxnord = np.max(northing)
    if minnord < 0 or maxnord > 10000000:
        raise TelemacException(\
                '... Northing out of range '
                '(must be between 0 m and 10.000.000 m)')

    # ~~> zoning
    if zone < 1 or zone > 60:
        raise TelemacException(\
                '... Zone number out of range '
                '(must be between 1 and 60)')

    m = northing / K0
    mu1 = m / (R * M1)

    p_rad = (mu1 + P2 * np.sin(2 * mu1) + P3 * np.sin(4 * mu1) +
             P4 * np.sin(6 * mu1))

    p_sin = np.sin(p_rad)
    p_sin2 = np.power(p_sin, 2)

    p_cos = np.cos(p_rad)

    p_tan = np.divide(p_sin, p_cos)
    p_tan2 = np.power(p_tan, 2)
    p_tan4 = np.power(p_tan, 4)

    ep_sin = 1 - E * p_sin2
    ep_sin_sqrt = np.power((1 - E * p_sin2), -0.5)

    n = np.power((R * ep_sin_sqrt * K0), -1)
    tmp = (ep_sin) / (1 - E)

    c_1 = _E * np.power(p_cos, 2)
    c_2 = np.power(c_1, 2)

    d_1 = np.multiply(easting, n)
    d_2 = np.power(d_1, 2)
    d_3 = np.power(d_1, 3)
    d_4 = np.power(d_1, 4)
    d_5 = np.power(d_1, 5)
    d_6 = np.power(d_1, 6)

    latitude = (p_rad - np.multiply(\
              np.multiply(p_tan, tmp), \
              (d_2/2 - d_4/24 * (5+3*p_tan2+10*c_1-4*c_2-9*E_P2))) + \
        d_6/720 * (61+90*p_tan2+298*c_1+45*p_tan4-252*E_P2-3*c_2))
    latitude = np.degrees(latitude)

    longitude = np.divide(\
            (d_1 - np.multiply(d_3, (1+2*p_tan2+c_1))/6 + \
             np.multiply(d_5, (5-2*c_1+28*p_tan2-3*c_2+8*E_P2+24*p_tan4))/120),
            p_cos)
    longitude = np.degrees(longitude) + ((zone - 1) * 6 - 180 + 3)

    return longitude, latitude
Beispiel #7
0
    def __init__(self, steering_file, code_name, working_dir):
        """
        Init function

        @param steering_file (string) Name of the steering file to run
        @param code_name (string) Name of the module used
        @param working_dir (string) If not empty will be the name of the
                                         working directory
        """
        if not path.exists(steering_file):
            raise TelemacException(
                "Could not find your steering file :\n{}".format(
                    steering_file))
        self.steering_file = steering_file
        self.case_dir = path.dirname(path.realpath(self.steering_file))
        self.working_dir = ''
        self.code_name = code_name
        self.sortie_file = ''
        self.exe_name = ''
        self.run_cmd = ''
        self.mpi_cmd = ''
        self.par_cmd = ''

        # Getting configuration information
        self.cfgname = CFGS.cfgname
        self.cfg = CFGS.configs[CFGS.cfgname]

        # Special beahviour for mascaret not need for the treatment below
        if self.code_name == 'mascaret':
            # Mascaret
            self.working_dir = self.case_dir
            self.bin_path = path.join(self.cfg['root'], 'builds', self.cfgname,
                                      'bin')
            ext = self.cfg['SYSTEM']['sfx_exe']
            self.exe_name = path.join(self.bin_path, self.code_name + ext)
            self.run_cmd = self.exe_name
            # mascaret only runs in seuqential
            self.ncsize = 1
            return

        # Searching for the dictionary associated with the steering case
        self.dico_file = path.join(self.cfg['MODULES'][self.code_name]['path'],
                                   self.code_name + '.dico')
        if not path.exists(self.dico_file):
            raise StudyException(
                self, 'Could not find the dictionary file: {}'.format(
                    self.dico_file))

        # ~~> processing steegin file
        self.cas = TelemacCas(self.steering_file, self.dico_file)

        # parsing informations for coupled modules steering files
        cplages = self.cas.get('COUPLING WITH', '').split(',')

        self.ncnode = 1
        self.nctile = 1
        self.ncsize = self.cas.get('PARALLEL PROCESSORS', default=1)

        self.lang = self.cas.lang

        self.cpl_cases = {}

        # /!\ having done the loop this way it will not check for DELWAQ
        cpl_codes = []
        for cplage in cplages:
            for mod in self.cfg['MODULES']:
                if mod in cplage.lower():
                    cpl_codes.append(mod)

        for code in cpl_codes:
            # ~~~~ Extract the CAS File name ~~~~~~~~~~~~~~~~~~~~~~~
            cas_name_cpl = self.cas.get(code.upper() + ' STEERING FILE')
            cas_name_cpl = path.join(self.case_dir, cas_name_cpl)

            if not path.isfile(cas_name_cpl):
                raise StudyException(
                    self, 'Missing coupling steering file for ' + code + ': ' +
                    cas_name_cpl)

            # ~~ Read the coupled CAS File ~~~~~~~~~~~~~~~~~~~~~~~~~
            dico_file_plage = path.join(self.cfg['MODULES'][code]['path'],
                                        code + '.dico')
            cas_plage = TelemacCas(cas_name_cpl, dico_file_plage)

            self.cpl_cases[code] = cas_plage

        # ~~> structural assumptions
        self.bin_path = path.join(self.cfg['root'], 'builds', self.cfgname,
                                  'bin')
        self.obj_path = self.cfg['MODULES'][self.code_name]['path'].replace(
            path.join(self.cfg['root'], 'sources'),
            path.join(self.cfg['root'], 'builds', self.cfgname, 'obj'))
        self.lib_path = path.join(self.cfg['root'], 'builds', self.cfgname,
                                  'lib')

        self.set_working_dir(working_dir)
        self.set_exe()
Beispiel #8
0
def main():
    """
    Main program for the compilation of the documentation of
    the telemac-mascaret system
    """
    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nLoading Options and Configurations\n' + '~' * 72 + '\n')
    parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
                            description=('''\n
By Default all the documentation are generated\n
use the options --validation/reference/user/release/theory to compile only one
        '''))
    parser = add_config_argument(parser)
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        dest="verbose",
                        default=False,
                        help="Will display listing for all commands")
    parser.add_argument(
        "-m",
        "--modules",
        dest="modules",
        default='',
        help="specify the list modules (, separated), default is all of them "
        + "from {" + ",".join(MODULE_LIST) + "}")
    parser.add_argument(
        "-M",
        "--misc",
        dest="misc",
        default='',
        help="specify the list of misc documentation (, separated) to compile, "
        "default is all of them " + "from {" + ",".join(MISC_LIST) + "}")
    parser.add_argument("--validation",
                        action="store_true",
                        dest="validation",
                        default=False,
                        help="Will generate the validation documentation")
    parser.add_argument(
        "--case-list",
        dest="case_list",
        default='',
        help="List of cas to include in the validation documentation"
        "separated by ',' (default all of them)")
    parser.add_argument("--reference",
                        action="store_true",
                        dest="reference",
                        default=False,
                        help="Will generate the reference documentation")
    parser.add_argument("--user",
                        action="store_true",
                        dest="user",
                        default=False,
                        help="Will generate the user documentation")
    parser.add_argument("--release",
                        action="store_true",
                        dest="release_note",
                        default=False,
                        help="Will generate the release note")
    parser.add_argument("--theory",
                        action="store_true",
                        dest="theory_guide",
                        default=False,
                        help="Will generate the theory guide")
    parser.add_argument(
        "--clean",
        action="store_true",
        dest="cleanup",
        default=False,
        help="Will remove all temporary file generated by pdflatex")
    parser.add_argument("--fullclean",
                        action="store_true",
                        dest="fullcleanup",
                        default=False,
                        help="Same as clean but removes the pdf as well")

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    options = parser.parse_args()
    update_config(options)

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Works for all configurations unless specified ~~~~~~~~~~~~~~~
    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Compile the valiation documentation
    doall = not (options.validation or options.user or options.reference
                 or options.release_note or options.theory_guide)
    cfg = CFGS.configs[CFGS.cfgname]
    # still in lower case
    root = CFGS.get_root()
    # Get what i to be compiled
    # By default everything if something is defined compiling only that
    if options.modules != '':
        module_list = options.modules.split(',')
        for module in module_list:
            if module not in MODULE_LIST:
                raise TelemacException(
                    "{} is not in list of modules ({})".format(
                        module, ",".join(MODULE_LIST)))
    else:
        # all modules
        module_list = MODULE_LIST
    if options.misc != '':
        misc_list = options.misc.split(',')
        for misc in misc_list:
            if misc not in MISC_LIST:
                raise TelemacException("{} is not in list of misc ({})".format(
                    misc, ",".join(MISC_LIST)))
        module_list = []
    else:
        # all docs
        misc_list = MISC_LIST
        # If a module was specified or a specific documentation for modules
        # not compiling Misc documentation
        if options.modules != '' or not doall:
            misc_list = []

    CFGS.compute_vnv_info()

    # Get version in config if it exist use trunk otherwise
    version = cfg.get('version', 'trunk')

    # Initialise output message
    output_mess = '\n\n'
    # Look on all the modules for the documentation
    for code_name in module_list:
        print('\nCompilation of the documentation for ' + code_name + '\n' +
              '~' * 72)
        # list of what to do for the module
        todo = []
        if options.validation or doall:
            if code_name not in ['telapy', 'mascaret', 'coupling']:
                # Building Validation LaTeX file
                doc_dir = path.join(root, 'documentation', code_name,
                                    'validation')
                chdir(doc_dir)
                if options.case_list != '':
                    list_of_case = options.case_list.split(',')
                else:
                    list_of_case = list(cfg['VALIDATION'][code_name].keys())
                    list_of_case.remove('path')
                skiped_case = \
                    create_case_list_file(
                        doc_dir,
                        cfg['VALIDATION'][code_name]['path'],
                        list_of_case,
                        options.cleanup or options.fullcleanup)
                for case in skiped_case:
                    output_mess += r'   - /!\ Missing LaTeX file for ' + \
                                   case+'\n'
                todo.append('validation')
        if options.reference or doall:
            if code_name not in ['telapy', 'mascaret', 'nestor', 'coupling']:
                # Path to the dictionary
                dictionary = path.join(root, 'sources', code_name,
                                       code_name + '.dico')
                # Path to latex File
                latex_file = path.join(root, 'documentation', code_name,
                                       'reference', 'latex', 'Corpus.tex')
                # English only for now
                lng = '2'
                # Path to bin directory
                exe_path = path.join(\
                        root, 'builds', CFGS.cfgname,
                        'bin', 'damocles'+cfg['SYSTEM']['sfx_exe'])
                generate_ref_from_dict(\
                        exe_path, dictionary, latex_file, lng,
                        options.cleanup or options.fullcleanup,
                        options.verbose)
                todo.append('reference')
        if options.user or doall:
            if code_name not in ['mascaret']:
                # Normal Compilation of a LaTeX file
                todo.append('user')
        if options.theory_guide or doall:
            # theory guide only available for telemac3d
            if code_name in ['telemac3d', 'mascaret', 'waqtel']:
                todo.append('theory_guide')
        for doc_type in todo:
            print('\n     ~> Compilation of the {} documentation'\
                  .format(doc_type))
            doc_dir = path.join(root, 'documentation', code_name, doc_type)
            chdir(doc_dir)
            # Check if the file exist
            if path.exists(
                    path.join(doc_dir, code_name + "_" + doc_type + ".tex")):
                if code_name == 'telapy' and \
                   not (options.cleanup or options.fullcleanup):
                    # Running small script to generate list of variables for api
                    generate_list_variables(doc_dir)
                compile_doc(doc_dir, code_name + '_' + doc_type, version,
                            options.cleanup, options.fullcleanup,
                            options.verbose)
            else:
                raise TelemacException(\
                        "   - Error for {} {}, {}.tex "
                        "not found ".format(code_name,
                                            path.basename(doc_dir),
                                            code_name+"_"+doc_type))
            if not (options.cleanup or options.fullcleanup):
                output_mess += '   - Created %s_%s_%s.pdf\n' % \
                              (code_name, doc_type, version)
    # List of the other documentation
    print('\nCompilation of the documentation for Misc' + '\n' + '~' * 72)
    for doc in misc_list:
        print('\n     ~> Compilation of the {} documentation'.format(doc))
        doc_dir = path.join(root, 'documentation', 'Misc', doc)

        if doc == 'notebooks':
            notebook_dir = path.join(root, 'notebooks')
            doc_dir = path.join(root, 'documentation', doc)
            if not (options.fullcleanup or options.cleanup):
                generate_notebook_html(doc_dir, notebook_dir, options.verbose)
        elif doc in ['doxydocs', 'doxypydocs']:
            if not (options.fullcleanup or options.cleanup):
                generate_doxygen(doc, options.verbose)
        else:
            chdir(doc_dir)
            if path.exists(path.join(doc_dir, doc + ".tex")):
                compile_doc(doc_dir, doc, version, options.cleanup,
                            options.fullcleanup, options.verbose)
            else:
                raise TelemacException(\
                        "   - Error in {}, {}.tex "
                        "not found ".format(path.basename(doc_dir), doc))

        if not (options.cleanup or options.fullcleanup):
            if doc not in ['notebooks', 'doxydocs', 'doxypydocs']:
                output_mess += '   - Created %s for %s.pdf\n' % \
                              (doc, version)
            else:
                output_mess += '   - Created %s_%s.pdf\n' % \
                              (doc, version)

    print(output_mess)
    print('\n\n' + '~' * 72)

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nMy work is done\n\n')

    sys.exit(0)
Beispiel #9
0
def generate_list_variables(doc_dir):
    """
    Generates in latex format list of variables for each telapy module

    @param doc_dir (str) Path of the documentation
    """
    try:
        import _api
        api = sys.modules['_api']
        api_avail = True
    except ImportError:
        print("Api nor available generating empty list of variable")
        api_avail = False

    list_variable_template = Template(r"""
\begin{longtable}{|p{.40\textwidth} | p{.60\textwidth}|}
\hline
Variable name & Definition \tabularnewline
\hline
\hline
$content
\hline
\caption{Accessible variables through the API for $mod}
\end{longtable}""")

    for module in ['t2d', 't3d', 'sis', 'art', 'wac']:

        content = ''
        if api_avail:
            print(" ~> Getting var for", module)
            # Using direct acces to api (no need to instanciate)
            mod_handle_var = getattr(api, "api_handle_var_" + module)
            nb_var = getattr(mod_handle_var, "nb_var_" + module)
            var_len = getattr(mod_handle_var, module + "_var_len")
            info_len = getattr(mod_handle_var, module + "_info_len")
            get_var_info = getattr(mod_handle_var,
                                   "get_var_info_{}_d".format(module))
            set_var_list = getattr(mod_handle_var,
                                   "set_var_list_{}_d".format(module))

            # Building array of variable info
            error = set_var_list()
            if error != 0:
                raise TelemacException(
                    "Error while setting var list: {}".format(error))

            # Getting info for each variable
            for i in range(nb_var):
                tmp_varname, tmp_varinfo, error = \
                        get_var_info(i+1, var_len, info_len)
                if error != 0:
                    raise TelemacException(
                        "Error when getting info for var {}".format(i + 1))
                varname = b''.join(tmp_varname).decode('utf-8').strip()
                varinfo = b''.join(tmp_varinfo).decode('utf-8').strip()

                # Adding new line the array
                content += '\n{} & {}\\tabularnewline'\
                           .format(varname.replace('_', r'\_'),
                                   varinfo.replace('_', r'\_'))

        file_name = path.join(doc_dir, 'latex',
                              'var_list_{}.tex'.format(module))
        with open(file_name, 'w') as ffile:
            ffile.write(
                list_variable_template.substitute(mod=module, content=content))
Beispiel #10
0
def compiletex(texfile, version, verbose):
    """
    @brief Full procedure for compiling a LaTeX file
             .i.e pdflatex,bibtex,pdflatex,pdflatex
    @param texfile Name of the main LaTex file
    @param version Version of the code/documentation
    @param verbose If yes display pdflatex listing
    """

    if verbose:
        tex_cmd = "pdflatex --jobname={tex}_{version} {tex}.tex"\
                  .format(tex=texfile, version=version)
        bib_cmd = "bibtex {}_{}.aux".format(texfile, version)
    else:
        tex_cmd = \
           "pdflatex --jobname={tex}_{version} {tex}.tex >latex_run.log 2>&1"\
                  .format(tex=texfile, version=version)
        bib_cmd = "bibtex {}_{}.aux >bib_run.log 2>&1".format(texfile, version)

    # First compilation
    mes = Messages(size=10)
    tail, code = mes.run_cmd(tex_cmd, False)

    if code != 0:
        if verbose:
            log = ''
        else:
            log = '\noutput:\n{}'.format(\
                    '\n'.join(get_file_content('latex_run.log')[-20:]))
        raise TelemacException(\
            'Latex compilation failed\n{}\n{}'\
            .format(tail, log))

    # Bibtex compilation
    tail, code = mes.run_cmd(bib_cmd, False)

    # Forcing bibtex to pass
    code = 0

    if code != 0:
        if verbose:
            log = ''
        else:
            log = '\noutput:\n{}'.format(\
                    '\n'.join(get_file_content('latex_run.log')[-20:]))
        raise TelemacException(\
            'Latex compilation failed\n{}\n{}'\
            .format(tail, log))

    # Second compilation
    tail, code = mes.run_cmd(tex_cmd, False)

    if code != 0:
        if verbose:
            log = ''
        else:
            log = '\noutput:\n{}'.format(\
                    '\n'.join(get_file_content('bib_run.log')[-20:]))
        raise TelemacException(\
            'Latex compilation failed\n{}\n{}'\
            .format(tail, log))

    # Third compilation
    tail, code = mes.run_cmd(tex_cmd, False)

    if code != 0:
        if verbose:
            log = ''
        else:
            log = '\noutput:\n{}'.format(\
                    '\n'.join(get_file_content('latex_run.log')[-20:]))
        raise TelemacException(\
            'Latex compilation failed\n{}\n{}'\
            .format(tail, log))
Beispiel #11
0
def generate_notebook_html(doc_dir, notebook_dir, verbose):
    """
    Generate an html layout of the notebooks using ipython nbconvert
    Than coying back the file into doc_dir

    @param doc_dir (string) Path to the folder that will contain the html
                            version of the documentation
    @param notebook_dir (string) Path to the notebooks
    @param verbose (bool) If True more verbose
    """
    # Creating doc folder if necessary
    if not path.exists(doc_dir):
        mkdir(doc_dir)

    # Running convertion in notebook folder
    # Gathering all html files
    for root, subdirs, files in walk(notebook_dir):
        # Creating subfolders in advance
        for subdir in subdirs:
            if ".ipynb_checkpoint" in root:
                continue
            out_dir = path.join(doc_dir + root.replace(notebook_dir, ''),
                                subdir)
            if not path.exists(out_dir):
                mkdir(out_dir)
        for ffile in files:
            if ffile.endswith("ipynb"):
                # Skipping notebook tmp folders
                if ".ipynb_checkpoint" in root:
                    continue
                notebook = path.join(root, ffile)
                out_dir = doc_dir + root.replace(notebook_dir, '')
                if verbose:
                    log_lvl = 'DEBUG'
                else:
                    log_lvl = 'ERROR'
                cmd = "jupyter nbconvert --to html --log-level={log_lvl} "\
                      "--output-dir={out_dir} --output={output} {nb}"\
                       .format(log_lvl=log_lvl, out_dir=out_dir,
                               output="tmp.html", nb=notebook)
                print("   ~> Converting "+\
                        path.join(root.replace(notebook_dir, '')[1:], ffile))
                if verbose:
                    print(cmd)
                # Running convertion
                mes = Messages(size=10)
                tail, code = mes.run_cmd(cmd, bypass=False)

                if code != 0:
                    raise TelemacException(
                        'nbconvert failed\n {}'.format(tail))

                tmp_file = path.join(out_dir, 'tmp.html')
                out_file = path.join(out_dir, ffile[:-5] + "html")

                # Replacing .ipynb in content of file by .html
                with open(tmp_file, 'r') as f:
                    content = f.read()

                remove(tmp_file)
                with open(out_file, 'w') as f:
                    f.write(content.replace(".ipynb", ".html"))
Beispiel #12
0
def main(module=None):
    """
    @brief Main function of the runcode.py module

    @param module (string): the name of the module to run (
      available modules are: telemac2d, telemac3d, artemis, tomawac,
      sisyphe, artemis, postel3d, ...)

    @return None
    """

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nLoading Options and Configurations\n'+72*'~'+'\n')
    if module is None:
        parser = ArgumentParser(
            formatter_class=RawDescriptionHelpFormatter,
            description=('''\n
runcode is the execution launcher for all TELEMAC modules.\n
where module can be:\n
    mascaret     the 1D hydrodyanmic / tracer / water quality solver
    telemac2d    the 2D hydrodyanmic / tracer / water quality solver
    telemac3d    the 3D hydrodynamic / tracer / water quality solver
    artemis      the phase resolving wave solver
    tomawac      the 3rd generation wave transformation solver
    sisyphe      the sediment transport and geomorphogical solver
    stbtel       a pre-processor for the modules
    postel3d     a post-processor for telemac3d
            '''),
            usage=' (--help for help)\n---------\n        =>  '
                  '%(prog)s module [options] casfile(s)\n---------',
            epilog=('''\nexamples:\n---------
1:     => runcode.py telemac2d -s t2d.cas
---------'''))
        parser.add_argument(
            "module",
            default=None,
            choices=['telemac2d', 'telemac3d', 'artemis', 'tomawac',
                     'stbtel', 'postel3d', 'sisyphe', 'partel', 'estel3d',
                     'mascaret'])
    else:
        parser = ArgumentParser(
            formatter_class=RawDescriptionHelpFormatter,
            description=('''\n
%(prog)s is one of the execution launcher for the TELEMAC system.
            '''),
            epilog=('''\nexamples:\n---------
1:     => %(prog)s -s t2d.cas
---------'''))
        parser.set_defaults(module=module)

    parser = add_runcode_argument(parser, module=module)
    # Arguments
    parser.add_argument("args", metavar='cas file(s)', nargs="+")

    options = parser.parse_args()

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    update_config(options)

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ banners ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    svn_banner(CFGS.get_root())

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Works for one configuration only ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # Checking if symlink is available
    if options.use_link and not check_sym_link(options.use_link):
        raise TelemacException(\
                '\nThe symlink option is only '
                'available on Linux systems. '
                'Remove the option and try again')

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Reads command line arguments ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    code_name = options.module
    cas_files = options.args

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Works for only one configuration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # bypass errors and carries on
    options.bypass = False
    if options.split or options.merge or options.run:
        if options.w_dir == '':
            raise TelemacException(\
                    '\nPlease use option -w (--workdirectory)'
                    ' with either of the options '
                    '--split, --run or --merge\n')
    # parsing for proper naming
    CFGS.compute_execution_info()
    cfg = CFGS.configs[CFGS.cfgname]

    print('\n\nRunning your CAS file(s) for:\n'+'~'*72+'\n')
    CFGS.light_dump()
    if options.w_dir != '':
        print('     +> directory        ' + options.w_dir)
        options.tmpdirectory = False
    print('\n\n'+'~'*72+'\n')

# >>> Check wether the config has been compiled for the runcode
    if options.compileonly:
        cfg['REBUILD'] = 1
    if code_name not in cfg['MODULES']:
        raise TelemacException(\
                '\nThe code requested is not installed '
                'on this system : {}\n'.format(code_name))

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Reporting errors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    xcpts = Messages()

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Run the Code from the CAS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    for cas_file in cas_files:
        run_study(cas_file, code_name, options)

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Reporting errors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    if xcpts.not_empty():
        raise TelemacException(\
                '\n\nHummm ... I could not complete '
                'my work.\n{}{}'.format('~'*72, xcpts.except_messages()))

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nMy work is done\n\n')
    sys.exit(0)
def mapdiff(a_1, a_2, notime=False, noname=False, relative=False):
    """
    Function used in the xml files to do the comparaison between two files.
    @brief Create a new values containing the diff of the values
    of the two arguments

    @param a_1 A Values object containing the keys {support,names,values,time}
    @param a_2 A Values object containing the keys {support,names,values,time}
    @param notime (optional) allow comparison of result with different time
    @param noname (optional) Not checking that names are the same
    @param relative (boolean) If true relative error is computed otherwise
                              absolute one


    @return A 4-uple (time,name_of_the_variables,support,values)
    """
    # Cheking they have the same shape should be (ntime,nvar,npoin)
    diff = zeros(a_2.values.shape, dtype=a_2.values.dtype)
    # With ntime = 1
    if a_1.values.shape != a_2.values.shape:
        raise TelemacException(\
              "Error in files the two array do not have the same shape"\
              " (ntimestep, nvar, npoin)\n"+
              "a_1 shape: " + repr(a_1.values.shape) + "\n"
              "a_2 shape: " + repr(a_2.values.shape))
    # Shape of the values should be (ntime,nvar,npoin)
    _, nvar, npoin = a_2.values.shape
    # Checking that it is the same variables in each files unless nonam is true
    if not noname:
        for ivar in range(nvar):
            if a_1.names[ivar][0:15] != a_2.names[ivar][0:15]:
                raise TelemacException(\
                    "Could not found '{}' from a_1 in a_2:\n{}".format( \
                    a_1.names[ivar][0:16], a_2.names))
    # Checking if we have the same time step
    if not notime:
        if abs(a_1.time[0] - a_2.time[0]) > 1e-6:
            raise TelemacException(\
                'The time of the two times are different \n' + \
                str(a_1.time[0]) + ' for a_1\n' + \
                str(a_2.time[0]) + ' for a_2')
    # Making a_1-a_2 for each variable and each point
    # TODO: Optimize using numpy computation (remove loops)
    if relative:
        for ivar in range(nvar):
            for i in range(npoin):
                ldiff = a_2.values[0][ivar][i] \
                           - a_1.values[0][ivar][i]
                if abs(ldiff) > 1e-42:
                    a_max = max(abs(a_2.values[0][ivar][i]), \
                                abs(a_1.values[0][ivar][i]))
                    diff[0][ivar][i] = ldiff / a_max
                else:
                    diff[0][ivar][i] = 0.0
    else:
        for ivar in range(nvar):
            for i in range(npoin):
                diff[0][ivar][i] = a_2.values[0][ivar][i] \
                                  - a_1.values[0][ivar][i]

    return a_2.time, a_2.names, a_2.support, diff
Beispiel #14
0
    def _parse_cas(self):
        """
        Parse the steering file and identify (key, value)
        And updates self.values according to the pairs identified
        """
        lines = []
        # ~~ clean ending empty lines
        with open(self.file_name, 'r') as f:
            for line in f.readlines():
                # Remove trailing spaces (left and right) and \n
                line = line.strip(' ').rstrip('\n')
                # not adding empty lines
                if line != '':
                    # Skipping &key (&ETA, &FIN...)
                    if line[0] == '&':
                        continue
                    lines.append(line)

        # ~~ clean comments
        core = []
        for line in lines:
            line = line.replace('"""', "'''")\
                       .replace('"', "'")\
                       .replace("''", '"')
            proc = re.match(KEY_COMMENT, line + '/')
            ini_line = line
            line = proc.group('before').strip() + ' '
            proc = re.match(EMPTY_LINE, line)
            if not proc:
                core.append(line)
            else:
                # Save usefull comments (i.e. with text in it)
                useless = True
                for char in ini_line:
                    if char not in ['/', '*', '-', ' ', '+']:
                        useless = False
                        break
                if not useless:
                    self.comments.append(ini_line)

        # Creates a one line of the cleaned up steering
        cas_stream = (' '.join(core))
        # ~~ Matching keword -> values
        while cas_stream != '':
            # ~~ Matching keyword
            proc = re.match(KEY_EQUALS, cas_stream)
            if not proc:
                raise TelemacException(' Error while parsing steering file {} '
                                       'incorrect line:\n{}'.format(
                                           self.file_name, cas_stream[:100]))
            keyword = proc.group('key').strip()
            cas_stream = proc.group('after')  # still hold the separator
            # ~~ Matching value
            proc = re.match(VAL_EQUALS, cas_stream)
            if not proc:
                raise TelemacException('No value to keyword ' + keyword)
            val = []
            # The value can be on multiple lines
            while proc:
                if proc.group('val') == '"':
                    val.append('')
                else:
                    val.append(proc.group('val').replace("'", ''))
                cas_stream = proc.group('after')  # still hold the separator
                proc = re.match(VAL_EQUALS, cas_stream)
            # Updating the value with the last one read
            self.values[keyword] = val
Beispiel #15
0
def main():
    """ Main function of manip_cas.py """
    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nLoading Options and Configurations\n' + 72 * '~' + '\n')
    parser = argparse.ArgumentParser(description='Translate a keyword')
    parser = add_config_argument(parser)
    parser.add_argument("module",
                        choices=[
                            'postel3d', 'telemac2d', 'telemac3d', 'tomawac',
                            'artemis', 'sisyphe', 'waqtel', 'khione', 'stbtel'
                        ],
                        help="Name of the module for which to translate")
    parser.add_argument(
        "-t",
        "--translate",
        action="store_true",
        dest="translate",
        default=False,
        help="Generate a french and english version of the steering file "
        "(cas_file suffixed with _fr and _gb)")

    parser.add_argument(
        "-s",
        "--sort",
        action="store_true",
        dest="sort",
        default=False,
        help="Rewrites the steering file using rubriques to sort the keywords "
        "cas_file suffixed with _sorted")

    parser.add_argument("--keep-comments",
                        action="store_true",
                        dest="keep_comments",
                        default=False,
                        help="When sorting will append all original comments "
                        "at the end of the file")

    parser.add_argument("cas_file", help="Name of the steering file to read")

    args = parser.parse_args()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Environment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    update_config(args)
    cfg = CFGS.configs[CFGS.cfgname]
    CFGS.compute_execution_info()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # Searching for the dictionary associated with the steering case
    dico_file = path.join(cfg['MODULES'][args.module]['path'],
                          args.module + '.dico')
    if not path.exists(dico_file):
        raise TelemacException(\
            'Could not find the dictionary file: {}'.format(dico_file))
    cas = TelemacCas(args.cas_file, dico_file, check_files=False)

    check_cas(cas)

    if args.translate:
        translate(cas)
    if args.sort:
        sort(cas, args.keep_comments)

    print('\n\nMy work is done\n\n')
    sys.exit(0)
Beispiel #16
0
 def __new__(cls, *args, **kwargs):
     if cls._instanciated:
         raise TelemacException("a Telemac2d instance already exists")
     instance = ApiModule.__new__(cls)
     cls._instanciated = True
     return instance
def from_lat_long(longitude, latitude, zone):

    # ~~> latitude
    minlat = np.min(latitude)
    maxlat = np.max(latitude)
    if minlat < -84 or maxlat > 84:
        raise TelemacException(\
                '... Latitude out of range '
                '(must be between 84 deg S and 84 deg N)')
    lat_rad = np.radians(latitude)
    lat_sin = np.sin(lat_rad)
    lat_cos = np.cos(lat_rad)
    lat_tan = np.divide(lat_sin, lat_cos)
    lat_tan2 = np.power(lat_tan, 2)
    lat_tan4 = np.power(lat_tan, 4)

    # ~~> longitude
    minlon = np.min(longitude)
    maxlon = np.max(longitude)
    if minlon < -180 or maxlon > 180:
        raise TelemacException(\
                '... Longitude out of range (must be between 180 deg W '
                'and 180 deg E)')
    lon_rad = np.radians(longitude)

    # ~~> zone number for the mid point

    midlat = (maxlat + minlat) / 2.0
    if zone == 0:
        midlon = (maxlon + minlon) / 2.0
        if 56 <= midlat <= 64 and 3 <= midlon <= 12:
            zone = 32
        elif 72 <= midlat <= 84 and midlon >= 0:
            if midlon <= 9:
                zone = 31
            elif midlon <= 21:
                zone = 33
            elif midlon <= 33:
                zone = 35
            elif midlon <= 42:
                zone = 37
        else:
            zone = int((midlon + 180) / 6) + 1

    # ~~> central longitude
    centre = (zone - 1) * 6 - 180 + 3
    centre = math.radians(centre)

    n = R * np.power((1 - E * np.power(lat_sin, 2)), -0.5)
    c_1 = E_P2 * np.power(lat_cos, 2)

    a_1 = np.multiply(lat_cos, (lon_rad - centre))
    a_2 = np.power(a_1, 2)
    a_3 = np.power(a_1, 3)
    a_4 = np.power(a_1, 4)
    a_5 = np.power(a_1, 5)
    a_6 = np.power(a_1, 6)

    m = R*(M1*lat_rad - M2*np.sin(2*lat_rad) + M3*np.sin(4*lat_rad) - \
             M4*np.sin(6*lat_rad))

    easting = 500000 + K0 * \
        np.multiply(n, (a_1 + a_3/6 * (1-lat_tan2+c_1) + \
                             a_5/120 * (5-18*lat_tan2+lat_tan4+72*c_1-58*E_P2)))

    northing = K0 * (m + np.multiply(np.multiply(n, lat_tan), ( \
        a_2/2 + a_4/24 * (5-lat_tan2+9*c_1+4*np.power(c_1, 2)) + \
        a_6/720*(61-58*lat_tan2+lat_tan4+600*c_1-330*E_P2))))

    if midlat < 0:
        northing = northing + 10000000

    return easting, northing, zone
Beispiel #18
0
    def compute_compilation_info(self, rescan=False, bypass=False):
        """
        Extract all the information required for
        the Compilation of TELEMAC
        Requires: root,
        Optional: mods_, incs_, libs_, ... and options

        @param cfgname (string) Name of the configuration
        @param rescan(boolean) check if it must rescan
        @param bypass(boolean) continue with a raise exception

        """
        cfg = self.configs[self.cfgname]

        tbd = self.compute_modules_info(rescan, bypass, add_odd=True)

        # ~~ Compilation options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        #
        #  Get libs_all: ... libs_artemis: ... libs_passive: ...
        #  mods_all: ... etc.
        #  for every module in the list of modules to account for
        #  specific external includes for all or each module
        for mod in cfg['MODULES']:
            for ext in ['mods', 'incs', 'libs']:
                externals = add_externals(cfg, ext, mod)\
                            .replace('<root>', cfg['root'])
                cfg['MODULES'][mod][ext] = externals
        # Get cmd_obj: ... cmd_lib: ... cmd_exe: ...
        # the compiler dependent command lines to create obj, lib and exe
        for mod in cfg['MODULES']:
            for ext in ['obj', 'lib', 'exe', 'pyf', 'pyd']:
                externals = get_externals(cfg, ext, mod)\
                         .replace('<root>', cfg['root'])
                cfg['MODULES'][mod]['x'+ext] = externals

        cfg['COMPILER'] = {}
        #  Get modules: user list of module
        #  in which 'system' means all existing modules,
        #  and in which 'update' means a rebuild of the lib and exe
        #  and in which 'clean' means a rebuild of the obj, lib and exe
        #  and Get options: for the switches such as parallel, openmi, mumps,
        #  etc.
        get, tbr = parse_user_modules(cfg, cfg['MODULES'])
        get = get.split()
        #  Add extra modules for special effects as priority items (insert(0,))
        #  where can be passive, for instance
        for mod in cfg['ADDONES']:
            if mod not in get:
                get.insert(0, mod)
        #  Delayed removal of the relevant CMDF - exception made for mascaret
        for mod in tbd:
            if mod in get and mod != 'mascaret':
                for fle in tbd[mod]:
                    remove(fle)

        cfg['COMPILER']['MODULES'] = get
        cfg['COMPILER']['REBUILD'] = tbr
        for mod in get:
            if mod not in cfg['MODULES']:
                raise TelemacException(\
                        '\nThe following module does not exist '
                        '{} \n'.format(mod))

        self.compute_zip_info()

        self.compute_system_info()

        self.compute_trace_info()
def generate_bnd(cli_file, geo_file, slf_file, bnd_file, varnames, varunits):
    """
    @param cli_file
    @param geo_file
    @param slf_file
    @param bnd_file
    @param varnames
    @param varunits
    """

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ cli+slf new mesh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    if not path.exists(cli_file):
        raise TelemacException(\
             '... the provided cli_file does not seem to exist:'
             ' {}\n\n'.format(cli_file))
    if not path.exists(geo_file):
        raise TelemacException(\
            '... the provided geo_file does not seem to exist: '
            '{}\n\n'.format(geo_file))

    if len(varnames) != len(varunits):
        raise TelemacException(\
          'Not the same number of variables and units\nvarnames: {}\nvarunits: {}'
          '{}\n\n'.format(varnames, varunits))

    # Read the new CLI file to get boundary node numbers
    print('   +> getting hold of the Conlim file and of its liquid boundaries')
    cli = Conlim(cli_file)
    # Keeping only open boundary nodes
    bor = np.extract(cli.bor['lih'] != 2, cli.bor['n'])

    # Find corresponding (x,y) in corresponding new mesh
    print('   +> getting hold of the GEO file and of its bathymetry')
    geo = Selafin(geo_file)
    xys = np.vstack((geo.meshx[bor - 1], geo.meshy[bor - 1])).T
    _ = geo.get_variables_at(0,\
                  subset_variables_slf("BOTTOM: ", geo.varnames)[0])[0]

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ slf existing res ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    if not path.exists(slf_file):
        raise TelemacException(\
               '... the provided slf_file does not seem to exist: '
               '{}\n\n'.format(slf_file))
    slf = Selafin(slf_file)
    slf.set_kd_tree()
    slf.set_mpl_tri()

    print('   +> support extraction')
    # Extract triangles and weigths in 2D
    support2d = []
    ibar = 0
    pbar = ProgressBar(maxval=len(xys)).start()
    for xyi in xys:
        support2d.append(
            xys_locate_mesh(xyi, slf.ikle2, slf.meshx, slf.meshy, slf.tree,
                            slf.neighbours))
        ibar += 1
        pbar.update(ibar)
    pbar.finish()
    # Extract support in 3D
    support3d = list(zip(support2d, len(xys) * [range(slf.nplan)]))

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ writes BND header ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    bnd = Selafin('')
    bnd.fole = {}
    bnd.fole.update({'hook': open(bnd_file, 'wb')})
    bnd.fole.update({'name': bnd_file})
    bnd.fole.update({'endian': ">"})  # big endian
    bnd.fole.update({'float': ('f', 4)})  # single precision

    # Meta data and variable names
    bnd.title = ''
    bnd.nbv1 = len(varnames)
    # /!\ ELEVATION has to be the first variable
    # (for possible vertical re-interpolation within TELEMAC)

    bnd.varnames = []
    bnd.varunits = []
    for var, unit in zip(varnames, varunits):
        new_var = var + (16 - len(var)) * " "
        new_unit = unit + (16 - len(unit)) * " "
        bnd.varnames.append(new_var)
        bnd.varunits.append(new_unit)

    bnd.nvar = bnd.nbv1
    bnd.varindex = range(bnd.nvar)

    # Sizes and mesh connectivity
    bnd.nplan = slf.nplan
    # Number of nodes per boundary element  (ndp2 in 2D and ndp3 in 3D)
    bnd.ndp2 = 2
    bnd.ndp3 = 4
    bnd.npoin2 = len(bor)
    bnd.npoin3 = bnd.npoin2 * slf.nplan
    bnd.iparam = [0, 0, 0, 0, 0, 0, bnd.nplan, 0, 0, 1]
    bnd.ipob2 = bor  # /!\ note that ipobo keeps the original numbering
    print('   +> masking and setting connectivity')
    # Set the array that only includes elements of geo.ikle2
    # with at least two nodes in bor
    array_1d = np.in1d(geo.ikle2, np.sort(bor - 1))
    mask = geo.ikle2[np.where(
        np.sum(array_1d.reshape(geo.nelem2, geo.ndp2), axis=1) == 2)]
    # this ikle2 keeps the original numbering
    ikle2 = np.ravel(mask)[np.in1d(mask,
                                   np.sort(bor - 1))].reshape(len(mask), 2)
    # ~~> re-numbering ikle2 as a local connectivity matrix
    knolg, _ = np.unique(np.ravel(ikle2), return_index=True)
    knogl = dict(zip(knolg, range(len(knolg))))
    bnd.ikle2 = -np.ones_like(ikle2, dtype=np.int)
    for k in range(len(ikle2)):
        # /!\ bnd.ikle2 has a local numbering, fit to the boundary elements
        bnd.ikle2[k] = [knogl[ikle2[k][0]], knogl[ikle2[k][1]]]
    # Last few numbers
    bnd.nelem2 = len(bnd.ikle2)
    if slf.nplan > 1:
        bnd.nelem3 = bnd.nelem2 * (slf.nplan - 1)
    else:
        bnd.nelem3 = bnd.nelem2
        bnd.ndp3 = bnd.ndp2
    # 3D structures
    if slf.nplan > 1:
        bnd.ipob3 = np.ravel(np.add(np.repeat(bnd.ipob2, slf.nplan)\
                                      .reshape((bnd.npoin2, slf.nplan)),
                                    bnd.npoin2*np.arange(slf.nplan)).T)
        bnd.ikle3 = \
            np.repeat(bnd.npoin2*np.arange(slf.nplan-1),
                      bnd.nelem2*bnd.ndp3)\
              .reshape((bnd.nelem2*(slf.nplan-1), bnd.ndp3)) + \
            np.tile(np.add(np.tile(bnd.ikle2, 2),
                           np.repeat(bnd.npoin2*np.arange(2), bnd.ndp2)),
                    (slf.nplan-1, 1))
    else:
        bnd.ipob3 = bnd.ipob2
        bnd.ikle3 = bnd.ikle2
    # Mesh coordinates
    bnd.meshx = geo.meshx[bor - 1]
    bnd.meshy = geo.meshy[bor - 1]

    print('   +> writing header')
    # Write header
    bnd.append_header_slf()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ writes BND core ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    print('   +> setting variables')
    # TIME and DATE extraction
    bnd.datetime = slf.datetime
    bnd.tags['times'] = slf.tags['times']
    # VARIABLE extraction
    list_var = varnames[0] + ": "
    for var in varnames[1:]:
        list_var += ";" + var + ": "

    vrs = subset_variables_slf(list_var, slf.varnames)

    # Read / Write data, one time step at a time to support large files
    print('   +> reading / writing variables')
    pbar = ProgressBar(maxval=len(slf.tags['times'])).start()
    zeros = np.zeros((bnd.npoin3, 1), dtype=np.float)
    for itime in range(len(slf.tags['times'])):
        data = get_value_history_slf(slf.file, slf.tags, [itime], support3d,
                                     slf.nvar, slf.npoin3, slf.nplan, vrs)
        data = np.reshape(
            np.transpose(
                np.reshape(np.ravel(data), (bnd.nvar, bnd.npoin2, bnd.nplan)),
                (0, 2, 1)), (bnd.nvar, bnd.npoin3))
        bnd.append_core_time_slf(itime)
        bnd.append_core_vars_slf(data)
        pbar.update(itime)
    pbar.finish()

    # Close bnd_file
    bnd.fole['hook'].close()
Beispiel #20
0
    def compute_vnv_info(self):
        """
        Extract all the information required for the validation
        of the relevant modules for each configuration
        The principal assumption is that the validation cases are
        either under:
          + val_root\\*
          + teldir\\examples\\module\\val_root\\*
        If the 'val_root' key is not in the config, the default
        path is assumed to be based on the second option
        """
        cfg = self.configs[self.cfgname]

        self.compute_modules_info()

        # Get libs_all: ... libs_artemis: ... mods_all: ... etc.
        # for every module in the list of modules to account for
        # specific external includes for all or each module
        for mod in cfg['MODULES']:
            cfg['MODULES'][mod].update(\
                {'mods': add_externals(cfg, 'mods', mod)\
                         .replace('<root>', cfg['root'])})
            cfg['MODULES'][mod].update(\
                {'incs': add_externals(cfg, 'incs', mod)\
                         .replace('<root>', cfg['root'])})
            cfg['MODULES'][mod].update(\
                {'libs': add_externals(cfg, 'libs', mod)\
                         .replace('<root>', cfg['root'])})

        cfg['VALIDATION'] = {}
        # Get validation: user list of module and there associated directories
        # in which 'system' means all existing modules,
        # and in which 'update' means a continuation,
        #     ignoring previously completed runs
        # and in which 'clean' means a re-run of all validation tests
        if 'val_root' not in cfg:
            val_root = path.realpath(path.join(cfg['root'], 'examples'))
            if not path.isdir(val_root):
                raise TelemacException(\
                 '\nNot able to find your validation set from the path: {} \n\n'
                 ' ... check the val_root key in your configuration file'
                 ''.format(val_root))
        else:
            val_root = cfg['val_root'].replace('<root>', cfg['root'])
        cfg['val_root'] = val_root

        _, examples, _ = next(walk(val_root))
        get, tbr = parse_user_modules(cfg, cfg['MODULES'])
        cfg['REBUILD'] = tbr
        # Removing specials module if we are not in system or if they are
        # not explicitly given
        specials = ['python3']
        # Removing python2 examples
        if 'python27' in examples:
            examples.remove('python27')

        for mod in specials:
            if not ("system" in cfg['modules'] or mod in cfg['modules']):
                examples.remove(mod)

        # Removing module that are not in the configuration
        for mod in list(examples):
            if mod not in get and mod in cfg['MODULES']:
                examples.remove(mod)
        # Exception for mascaret as it is not in cfg_telemac['MODULES']
        # Because it does not have a mascaret.dico file
        if 'mascaret' not in get:
            if 'mascaret' in examples:
                examples.remove('mascaret')
        for mod in examples:
            val_dir = path.join(val_root, mod)
            val_mod = get_files_validation_telemac(val_dir)
            if val_mod != {}:
                cfg['VALIDATION'].update(\
                     {mod: {'path': path.realpath(val_dir)}})
                cfg['VALIDATION'][mod].update(val_mod)

        self.compute_partel_info()

        self.compute_mpi_info()

        self.compute_hpc_info()

        self.compute_zip_info()

        self.compute_system_info()

        self.compute_trace_info()
Beispiel #21
0
def main():
    """
    Options for each code name
        scan [--core] *.slf [*.slf]
        + '--core': print statistics on all variables, for each time step with
                      the core of all SELAFIN file present in args
        chop [--from] [--step] [--stop] in.slf out.slf
        + '--from': first frame included in out.slf
        + '--step': step used to extract the appropriate frame for out.slf
        + '--stop': last frame included in out.slf (unless step jumps over it)
          frame numbers (from and stop) being numbered from 0
        + '--vars': list those variables that are being extracted (all if empty)
        + '--replace': replace the input file by the output file, in which case
          multiple input files can be used
        alter [--title] [--date] [--time] ...
    """

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nInterpreting command line options\n' + '~' * 72 + '\n')

    parser = ArgumentParser()
    subparser = parser.add_subparsers(\
            help='run_selafin commands to do', dest='command')

    subparser = chop_parser(subparser)
    subparser = scan_parser(subparser)
    subparser = spec_parser(subparser)
    subparser = alter_parser(subparser)
    subparser = merge_parser(subparser)
    subparser = diff_parser(subparser)
    subparser = calcs_parser(subparser, 'calcs', '???')
    subparser = calcs_parser(subparser, 'crunch', '???')
    subparser = calcs_parser(subparser, 'transf', '???')
    subparser = sample_parser(subparser)
    subparser = subdivide_parser(subparser)
    subparser = tesselate_parser(subparser)

    options = parser.parse_args()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Reads code name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    if options.command == 'scan':
        scan(options)
    elif options.command == 'spec':
        spec(options)
    elif options.command == 'chop':
        chop(options)
    elif options.command == 'alter':
        alter(options)
    elif options.command == 'merge':
        merge(options)
    elif options.command == 'diff':
        diff(options)
    elif options.command == 'sample':
        sample(options)
    elif options.command in ['calcs', 'crunch', 'transf']:
        calcs(options, options.command)
    elif options.command == 'subdivide':
        subdivide(options)
    elif options.command == 'tessellate':
        tesselate(options)
    else:
        raise TelemacException(\
                '\nDo not know what to do with '
                'this code name: {}'.format(options.command))


# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nMy work is done\n\n')

    sys.exit(0)
Beispiel #22
0
    def parse_cfg_file(self, cfg_file, name, root_dir, python_dir):
        """
        Get the name of the config file from command line arguments
        and store its rough content in a dict -- Returns the dict
        set globals CONFIGS

        @param cfg_file (string) the name of the configuration file
        @param name (string) configuration name
        @param root_dir (string) Path to root of sources
        @param python_dir (string) Path to root of Python scripts

        @return config_dict (dictionary) information for the configuration
        """
        # Checking that file exist
        if not path.exists(cfg_file):
            raise TelemacException('Could not find {}'.format(cfg_file))

        self.cfg_file = cfg_file
        self.cfgname = name
        # ~~ Parse CFG File ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        self.read_cfg()

        # ~~ Replacing user keys throughout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        key_sqbrack = re.compile(r'(?P<brack>\[[\w_.-~=+]*?\])')  # ,re.I)
        for cfgname in self.configs:
            # ~~> making sure cfgname also includes all keys from general
            cfg = self.configs[cfgname]
            for genkey in self.general:
                if genkey not in cfg:
                    cfg.update({genkey: self.general[genkey]})
                    # ~~> listing all [key] up for replacement
                    # (avoiding recursive referencing)
            key_records = []
            for cfgkey in cfg:
                for k in re.findall(key_sqbrack, cfg[cfgkey]):
                    key_records.append(k)
            # ~~> replacing [key] by its value, if so defined
            for k in key_records:
                key = k.strip('[]')
                for cfgkey in cfg:
                    if key in cfg:
                        cfg[cfgkey] = cfg[cfgkey].replace(k, cfg[key])
            # ~~> defaulting [key] to the environment, if so defined
            for cfgkey in cfg:
                for k in re.findall(key_sqbrack, cfg[cfgkey]):
                    key = k.strip('[]')
                    if key in environ:
                        cfg[cfgkey] = \
                           cfg[cfgkey]\
                           .replace(k, environ[key])
                    else:
                        print('... Could not find your special key '+k+
                              ' in key '
                              + cfgkey + ' of configuration ' + cfgname)
            # Setting root key if not defined
            if 'root' not in cfg:
                cfg['root'] = path.normpath(root_dir)
            if cfg['root'] == '':
                cfg['root'] = path.normpath(root_dir)
            else:
                cfg['root'] = path.normpath(cfg['root'])
            # Setting root key if not defined
            if 'pytel' not in cfg:
                cfg['pytel'] = python_dir
            if cfg['pytel'] == '':
                cfg['pytel'] = python_dir
Beispiel #23
0
def from_latlon(latitude,
                longitude,
                force_zone_number=None,
                force_zone_letter=None):
    """
    This function convert Latitude and Longitude to UTM coordinate

    @param latitude (np.array)
             Latitude between 80 deg S and 84 deg N, e.g. (-80.0 to 84.0)

    @param longitude (np.array)
             Longitude between 180 deg W and 180 deg E, e.g. (-180.0 to 180.0).

    @param force_zone_number (int)
             Zone Number is represented with global map numbers of an UTM Zone
             Numbers Map. You may force conversion including one UTM Zone
             Number.
             More information see utmzones [1]_
    @param force_zone_letter (str)
             Zone letter to be forced

       .. _[1]: http://www.jaworski.ca/utmzones.htm

    @returns (np.array, np.array, int) easting, northing, zone
    """
    if not in_bounds(latitude, -80.0, 84.0):
        raise TelemacException(
            'latitude out of range (must be between 80 deg S and 84 deg N)')
    if not in_bounds(longitude, -180.0, 180.0):
        raise TelemacException(
            'longitude out of range (must be between 180 deg W and 180 deg E)')
    if force_zone_number is not None:
        check_valid_zone(force_zone_number, force_zone_letter)

    lat_rad = np.radians(latitude)
    lat_sin = np.sin(lat_rad)
    lat_cos = np.cos(lat_rad)

    lat_tan = lat_sin / lat_cos
    lat_tan2 = lat_tan * lat_tan
    lat_tan4 = lat_tan2 * lat_tan2

    if force_zone_number is None:
        zone_number = latlon_to_zone_number(latitude, longitude)
    else:
        zone_number = force_zone_number

    if force_zone_letter is None:
        zone_letter = latitude_to_zone_letter(latitude)
    else:
        zone_letter = force_zone_letter

    lon_rad = np.radians(longitude)
    central_lon = zone_number_to_central_longitude(zone_number)
    central_lon_rad = np.radians(central_lon)

    n = R / np.sqrt(1 - E * lat_sin**2)
    c_0 = E_P2 * lat_cos**2

    a_0 = lat_cos * (lon_rad - central_lon_rad)
    a_2 = a_0 * a_0
    a_3 = a_2 * a_0
    a_4 = a_3 * a_0
    a_5 = a_4 * a_0
    a_6 = a_5 * a_0

    m = R * (M1 * lat_rad - M2 * np.sin(2 * lat_rad) +
             M3 * np.sin(4 * lat_rad) - M4 * np.sin(6 * lat_rad))

    easting = K0 * n * (
        a_0 + a_3 / 6 * (1 - lat_tan2 + c_0) + a_5 / 120 *
        (5 - 18 * lat_tan2 + lat_tan4 + 72 * c_0 - 58 * E_P2)) + 500000

    northing = K0 * (
        m + n * lat_tan *
        (a_2 / 2 + a_4 / 24 *
         (5 - lat_tan2 + 9 * c_0 + 4 * c_0**2) + a_6 / 720 *
         (61 - 58 * lat_tan2 + lat_tan4 + 600 * c_0 - 330 * E_P2)))

    if mixed_signs(latitude):
        raise ValueError("latitudes must all have the same sign")

    if negative(latitude):
        northing += 10000000

    return easting, northing, zone_number, zone_letter
def main():
    """
    Main function of diffSELAFIN
    """
    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nLoading Options and Configurations\n' + 72 * '~' + '\n')
    parser = ArgumentParser(\
        formatter_class=RawDescriptionHelpFormatter,
        description=('''\n
Reporting on differences between two SELAFIN files
        '''),
        usage=' (--help for help)\n---------\n      =>  '\
                '%(prog)s [options] file1.slf file2.slf\n---------')
    # ~~> Uselessly set to True as default ... may change in the future
    # ~~> The real switches
    parser.add_argument(\
        "--head", action="store_true",
        dest="head", default=False,
        help="Will print a statiscal differences between two SELARING files")
    parser.add_argument(\
        "--core", action="store_true",
        dest="core", default=False,
        help="Will print a statiscal differences between two SELARING files")
    parser.add_argument(\
        "--scan", action="store_true",
        dest="scan", default=False,
        help="Will print an individual summary for each file")
    parser.add_argument(\
        "-v", "--vars",
        dest="xvars", default=None,
        help= \
        "specify which variables should be differentiated (':'-delimited)")
    parser.add_argument(\
        "-f", "--from",
        dest="tfrom", default="1",
        help="specify the first frame included in the differentiation")
    parser.add_argument(\
        "-s", "--stop",
        dest="tstop", default="-1",
        help="specify the last frame included (negative from the end) "\
              "in the differentiation")
    parser.add_argument(\
        "-diff", "--step",
        dest="tstep", default="1",
        help="specify the step for the extraction of frames for "\
              "the differentiation")
    parser.add_argument(\
        "-e", "--epsilon",
        dest="epsilon", default="0",
        help="specify the threshold for which values are assumed the same")
    parser.add_argument(\
        "-b", "--bypass", action="store_true",
        dest="bypass", default=False,
        help="Will bypass certain mismatches between files")
    parser.add_argument(\
        "args", metavar='file1,file2',
        default='', nargs=2,
        help="operation: ( files1 - file2 )")
    options = parser.parse_args()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Double checks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    slf_file1 = options.args[0]
    if not path.exists(slf_file1):
        raise TelemacException(
            '\nCould not find the file named: {}'.format(slf_file1))
    slf_file2 = options.args[1]
    if not path.exists(slf_file2):
        raise TelemacException(
            '\nCould not find the file named: {}'.format(slf_file2))

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Initial scan ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    vrbls = options.xvars
    if options.xvars != None:
        vrbls = clean_quotes(options.xvars.replace('_', ' '))
    times = (int(options.tfrom), int(options.tstep), int(options.tstop))
    slf1 = ScanSelafin(slf_file1, times=times, vrs=vrbls)
    slf2 = ScanSelafin(slf_file2, times=times, vrs=vrbls)

    if options.scan:
        print('\n\nFirst file: ' + slf_file1 + '\n' + 72 * '~' + '\n')
        slf1.print_header()
        slf1.print_time_summary()
        print('\n\nSecond file: ' + slf_file2 + '\n' + 72 * '~' + '\n')
        slf2.print_header()
        slf2.print_time_summary()

    comparable = True

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Header differences ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    if options.head:
        print('\n\nHeader differences: \n' + 72 * '~' + '\n')

        # ~~> File formats
        if slf1.slf.file['endian'] != slf2.slf.file['endian']:
            print('\n  <> File ENDIANs:\n')
            if slf1.slf.file['endian'] == ">":
                print('     + ' + slf_file1 + ' is BIG ENDIAN')
            else:
                print('     + ' + slf_file1 + ' is LITTLE ENDIAN')
            if slf2.slf.file['endian'] == ">":
                print('     + ' + slf_file2 + ' is BIG ENDIAN')
            else:
                print('     + ' + slf_file2 + ' is LITTLE ENDIAN')
        if slf1.slf.file['float'] != slf2.slf.file['float']:
            print('\n  <> File FLOATs:\n')
            if slf1.slf.file['float'] == ('diff', 8):
                print('     + ' + slf_file1 + ' is DOUBLE PRECISION')
            else:
                print('     + ' + slf_file1 + ' is SINGLE PRECISION')
            if slf2.slf.file['float'] == ('diff', 8):
                print('     + ' + slf_file2 + ' is DOUBLE PRECISION')
            else:
                print('     + ' + slf_file2 + ' is SINGLE PRECISION')

    # ~~> File contents
    mes = '\n  <> List of variable names:\n'
    found = False
    cmn_vars = []
    mes = mes + '\n     + ' + slf_file1
    for ivar in range(len(slf1.slf.varnames)):
        if slf1.slf.varnames[ivar] in slf2.slf.varnames:
            mes = mes + '\n        = ' + slf1.slf.varnames[ivar]
            cmn_vars.append(slf1.slf.varnames[ivar])
        else:
            mes = mes + '\n        * ' + slf1.slf.varnames[ivar]
            found = True
    mes = mes + '\n     + ' + slf_file2
    for ivar in range(len(slf2.slf.varnames)):
        if slf2.slf.varnames[ivar] in slf1.slf.varnames:
            mes = mes + '\n        = ' + slf2.slf.varnames[ivar]
        else:
            mes = mes + '\n        * ' + slf2.slf.varnames[ivar]
            found = True
    if found and options.head:
        print(mes)
    if not cmn_vars:
        comparable = False
        print('\n  /!\\ no common variables. The files are not comparables.\n')

    # ~~> File reference dates and times
    if options.head:
        if max(np.array(slf1.slf.datetime) - np.array(slf2.slf.datetime)) > 0:
            print('\n  <> Different reference dates:')
            print('     + ' + slf_file1 + ': ' + repr(slf1.slf.datetime))
            print('     + ' + slf_file2 + ': ' + repr(slf2.slf.datetime))

    # ~~> File time frames
    mes = '\n  <> List of time frames:\n'
    found = False
    times0 = []
    times1 = []
    times2 = []
    # ~~> check if sorted times
    it1 = 1
    if len(slf1.slf.tags['times']) > 1:
        for it1 in range(len(slf1.slf.tags['times']))[1:]:
            if slf1.slf.tags['times'][it1] <= slf1.slf.tags['times'][it1 - 1]:
                break
        if slf1.slf.tags['times'][it1] > slf1.slf.tags['times'][it1 - 1]:
            it1 += 1
    it2 = 1
    if len(slf2.slf.tags['times']) > 1:
        for it2 in range(len(slf2.slf.tags['times']))[1:]:
            if slf2.slf.tags['times'][it2] <= slf2.slf.tags['times'][it2 - 1]:
                break
        if slf2.slf.tags['times'][it2] > slf2.slf.tags['times'][it2 - 1]:
            it2 += 1
    # ~~> correct if not bypassed
    if options.bypass and \
        len(slf1.slf.tags['times']) == len(slf2.slf.tags['times']):
        times0 = list(range(len(slf1.slf.tags['times'])))
    else:
        diff = np.setdiff1d(slf1.slf.tags['times'][:it1],
                            slf2.slf.tags['times'][:it2])
        if diff.size != 0:
            found = True
            mes = mes + '\n     + frames only in '+slf_file1+' : '+\
                    ', '.join(['{0:.2f}'.format(i) for i in diff])
        diff = np.setdiff1d(slf2.slf.tags['times'][:it2],
                            slf1.slf.tags['times'][:it1])
        if diff.size != 0:
            found = True
            mes = mes + '\n     + frames only in '+slf_file2+' : '+\
                    ', '.join(['{0:.2f}'.format(i) for i in diff])
        diff = np.intersect1d(slf1.slf.tags['times'][:it1],
                              slf2.slf.tags['times'][:it2])
        if diff.size != 0:
            mes = mes + '\n     + frames in both files: '+\
                  ', '.join([str(i) for i in diff])
            times1 = np.searchsorted(slf1.slf.tags['times'][:it1], diff)
            slf1.slf.tags['times'] = slf1.slf.tags['times'][times1]
            for time in range(len(slf1.slf.tags['cores']))[it1:]:
                slf1.slf.tags['cores'].remove(slf1.slf.tags['cores'][-1])
            for time in range(len(slf1.slf.tags['cores']))[::-1]:
                if time not in times1:
                    slf1.slf.tags['cores'].remove(slf1.slf.tags['cores'][time])
            times2 = np.searchsorted(slf2.slf.tags['times'][:it2], diff)
            slf2.slf.tags['times'] = slf2.slf.tags['times'][times2]
            for time in range(len(slf2.slf.tags['cores']))[it2:]:
                slf2.slf.tags['cores'].remove(slf2.slf.tags['cores'][-1])
            for time in range(len(slf2.slf.tags['cores']))[::-1]:
                if time not in times2:
                    slf2.slf.tags['cores'].remove(slf2.slf.tags['cores'][time])
            times0 = list(range(len(slf2.slf.tags['times'])))
            if options.head:
                print(mes)
        else:
            comparable = False
            print('\n  /!\\ no common time frames. '\
                    'The files are not comparables.\n')
        times0 = list(range(len(slf1.slf.tags['times'])))  # ... for instance
    if found and options.head:
        print(mes)

    # ~~> File geometries
    mes = ''
    if slf1.slf.npoin2 != slf2.slf.npoin2:
        mes = mes + '     + npoin2 = ' + str(
            slf1.slf.npoin2) + ' in ' + slf_file1
        mes = mes + '     * npoin2 = ' + str(
            slf2.slf.npoin2) + ' in ' + slf_file2
        mes = mes + '\n'
    if slf1.slf.nplan != slf2.slf.nplan:
        mes = mes + '     + nplan = ' + str(
            slf1.slf.nplan) + ' in ' + slf_file1
        mes = mes + '     * nplan = ' + str(
            slf2.slf.nplan) + ' in ' + slf_file2
        mes = mes + '\n'
    if mes != '':
        if options.head:
            print('\n  <> Gemetry:\n' + mes)
        comparable = False
        print('\n  /!\\different geometries. The files are not comparables.\n')

    if options.head:
        # ~~> File trangulations
        diff = slf1.slf.ikle2 - slf2.slf.ikle2
        if np.argwhere(diff > [0, 0, 0]):
            print('\n  <> 2D Triangulation:\n')
            print('     + number of mismatches: '+\
                    repr(len(np.argwhere(diff == [0, 0, 0]).T[0])))
            print('     + mismatched elements: '+\
                    repr(np.argwhere(diff == [0, 0, 0]).T[0][::3]))

    if options.head:
        # ~~> File geo-localisation
        diff = np.sqrt(np.power((slf1.slf.meshx-slf2.slf.meshx), 2) + \
                        np.power((slf1.slf.meshy-slf2.slf.meshy), 2))
        points = np.argwhere(diff > float(options.epsilon)).ravel()
        if points:
            print('\n  <> Geo-Localisation:\n')
            print('     + maximum distance between points : '+\
                  str(max(diff[points])))
            pt_th = 100 * len(points) / len(diff)
            print('     + number of points above clocelyness threshold : '+\
                  str(len(points))+' (i.points. '+str(pt_th)+'% of points)')
            print('     + node numbers : ' +
                  repr(np.arange(slf1.slf.npoin3)[points]))

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Core differences ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    if options.core and comparable:
        print('\n\nCore differences: \n' + 72 * '~' + '\n')

        found = False
        for time in times0:
            for var in cmn_vars:
                ivar = slf1.slf.varnames.index(var)
                jvar = slf2.slf.varnames.index(var)
                var1 = slf1.slf.get_variables_at(time,
                                                 [slf1.slf.varindex[ivar]])
                var2 = slf2.slf.get_variables_at(time,
                                                 [slf2.slf.varindex[jvar]])
                diff = np.absolute(var1 - var2).ravel()
                points = np.argwhere(diff > float(options.epsilon)).ravel()
                if points.size != 0:
                    found = True
                    time1 = slf1.slf.tags['times'][time]
                    time2 = slf2.slf.tags['times'][time]
                    print('\n  <> Frame: '+str(time)+' (times: '+\
                          '{0:.2f}'.format(time1)+' / '+'{0:.2f}'.format(time2)+
                          '), Variable: '+var+'\n')
                    print('     + max difference: ', max(diff[points]))
                    print('     + number of values above threshold : ' +
                          str(len(points)) + ' (i.points. ' +
                          str(100 * len(points) / len(diff)) + '% of points)')
                    print('     + node numbers :          ' +
                          repr(np.arange(slf1.slf.npoin3)[points]))
                    print('     + values at those nodes : ' +
                          repr(diff[np.arange(slf1.slf.npoin3)[points]]))
        if not found:
            print('  <> None to the epsilon: ' + repr(options.epsilon))


# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nMy work is done\n\n')

    sys.exit(0)
def parse_array_grid(string, size):
    """
    @brief     Decoding structure all in order
       The grid is defined by two points and an array of re-sampling steps
       The input 'size' is either:
          - in 2D a pair of 2D points ( bottom-left, top-right )
          - in 3D a pair of 2D points and a range of planes
       The input 'size' is a pair of complex points (2D or 3D) and
          a set of re-sampling numbers
       The output is an arry [..]. Each term is complicated ...
    """

    grids = []
    minz = 0.
    maxz = 0.
    minp = 0
    maxp = 0

    if len(size) == 3:
        (minx, miny), (maxx, maxy), (minp, maxp) = size
    elif len(size) == 2:
        if len(size[0]) == 2:
            (minx, miny), (maxx, maxy) = size
        else:
            (minx, miny, minz), (maxx, maxy, maxz) = size
    n_z = maxp - minp

    # ~~ Special deal of all points ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    if string == '[]':
        # arbitrary value of 20 points
        dist = (maxy - miny + maxx - minx) / 20.0
        dist = min(dist, maxx - minx)
        dist = min(dist, maxy - miny)
        x_o = (maxx + minx) / 2.0
        y_o = (maxy + miny) / 2.0
        n_x = max(2, int((maxx - minx) / dist))
        n_y = max(2, int((maxy - miny) / dist))
        dist = min(dist, (maxx - minx) / (1.0 * n_x))
        dist = min(dist, (maxy - miny) / (1.0 * n_y))
        if len(size) == 2 and len(size[0]) == 2:
            return [[(x_o - n_x * dist / 2.0, y_o - n_y * dist / 2.0),
                     (x_o + n_x * dist / 2.0, y_o + n_y * dist / 2.0),
                     [n_x, n_y]]]
        # TODO: make sure you can suport this option
        elif len(size) == 2 and len(size[0]) == 3:
            z_o = (maxz + minz) / 2.0
            n_z = 10
            dizt = (maxx - minx) / (1.0 * n_z)  # arbitrary value of 10 points
            return [[(x_o - n_x * dist / 2.0, y_o - n_y * dist / 2.0, z_o \
                      - n_z * dizt / 2.0),
                     (x_o + n_x * dist / 2.0, y_o + n_y * dist / 2.0, z_o\
                      + n_z * dizt / 2.0),
                     [n_x, n_y, n_z]]]
        else:
            return [[(x_o - n_x * dist / 2.0, y_o - n_y * dist / 2.0),
                     (x_o + n_x * dist / 2.0, y_o + n_y * dist / 2.0),
                     range(minp, maxp), [n_x, n_y, n_z]]]

    # ~~ Decoding of user entrance ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    s2g = parse_array_point(string)
    if gcd(len(s2g), 3) != 3:
        raise TelemacException(\
                '... could not parse your grid . "{}."'
                'It should be triplets made of 2 points (;)(;) '
                'and an array of resampling steps {{;}}.'.format(string))

    for i in range(len(s2g) // 3):
        pta, ptb, n_p = s2g[3 * i:3 * (i + 1)]
        if len(n_p) == 2:
            grids.append([pta[0], ptb[0], n_p])
        # TODO: support a range of fixed depths as well as fixed planes
        elif len(n_p) == 3:
            z_p = '[' + str(pta[1][0]) + ':' + str(ptb[1][0]) + ']'
            grids.append([pta[0], ptb[0], parse_array_frame(z_p, n_z), n_p])

    return grids
Beispiel #26
0
def convert_drogues_file_to_vtu(file_name_path, output_file_name_path=None):
    """
    Convert drogues TELEMAC file into .vtu format (readable by ParaView)

    @param file_name_path (string) Name of the dogues file of TELEMAC (.dat)
    @param output_file_name_path (string) Path of output file
    """
    if not path.exists(file_name_path):
        raise TelemacException('... could not find ASCII file for drogues: '
                               '{}'.format(file_name_path))
    with open(file_name_path, 'r', encoding='utf-8') as fle:
        if output_file_name_path is None:
            output_file_name_path = file_name_path
        # Title
        line = fle.readline()
        # Labels
        line = fle.readline()
        in_3d = False
        # If Z exists, 3D format, else 2D
        if line.find("Z") != -1:
            in_3d = True
        # Meta data
        line = fle.readline().split(',')
        number_of_particules = (int)(line[2].split('=')[1])
        time = (float)(line[3].split('=')[1])
        # initialize time_step to 0, time will synchronize results
        time_step = 0
        position_xyz = np.zeros([3 * number_of_particules], dtype=np.float)
        p_id = np.zeros([number_of_particules], dtype=np.int)
        state = np.zeros([number_of_particules], dtype=np.int)
        is_time_step = True
        while is_time_step:
            for i in range(number_of_particules):
                line = fle.readline().split(',')
                if in_3d:
                    position_xyz[3 * i] = (float)(line[1])
                    position_xyz[3 * i + 1] = (float)(line[2])
                    position_xyz[3 * i + 2] = (float)(line[3])
                    p_id[i] = (int)(line[0])
                    state[i] = (int)(line[4])
                else:
                    position_xyz[3 * i] = (float)(line[1])
                    position_xyz[3 * i + 1] = (float)(line[2])
                    position_xyz[3 * i + 2] = 0.
                    p_id[i] = (int)(line[0])
                    state[i] = (int)(line[3])
            write_one_step_in_time(
                output_file_name_path.split('.')[0], position_xyz, p_id, state,
                time_step, time)
            line = fle.readline().split(',')
            # Seeking for a new time step
            if line != '' and line[0].find("ZONE") != -1:
                time = (float)(line[3].split('=')[1])
                tmp_number_of_particules = (int)(line[2].split('=')[1])
                # not sure that resize if usefull...
                if tmp_number_of_particules != number_of_particules:
                    number_of_particules = tmp_number_of_particules
                    position_xyz = np.resize(position_xyz,
                                             [3 * number_of_particules])
                    p_id = np.resize(p_id, [number_of_particules])
                    state = np.resize(state, [number_of_particules])
                time_step += 1
            else:
                is_time_step = False
def run_validation_python_slurm(cfg, options, report, xcpts):
    """
    Run validation for vnv Python scripts slurm mode

    @param cfg (Dict) Configuration information
    @param options (ArgumentParser) List of arguments
    @param report (Report) Time of actions
    @param xcpts () Error handler
    """
    # Building list of files to run
    list_files = get_list_python_files(cfg, options)

    if list_files == []:
        print("Nothing to run (check tags and rank)")
        return

    n_files = len(list_files)
    root = CFGS.get_root()

    if options.cleanup or options.full_cleanup:
        for ifile, py_file in enumerate(sorted(list_files)):
            clean_vnv_working_dir(py_file, full=options.full_cleanup)

        return

    # Making sure that the file is not there before first run
    jobid_file = options.id_log
    if path.exists(jobid_file):
        remove(jobid_file)

    # ~> First submission run
    options.vnv_pre = True
    options.vnv_run = True
    options.vnv_post = False
    options.vnv_check = False
    options.bypass = True

    if options.hpc_queue == '':
        raise TelemacException(
            "Option --queue is mandatary with --vnv-mode=slurm")

    print("  ~> Submission part")
    for ifile, py_file in enumerate(sorted(list_files)):
        print('\n\nValidation < {}/{} > of {}'\
              .format(ifile+1, n_files, py_file.replace(root, '<root>')))
        run_python(py_file, options, report, xcpts)

    # Removing from list files all the ones that crashed in the first
    # submission run
    run_list_files = list_files.copy()
    for error in xcpts.messages:
        if error['name'] in run_list_files:
            run_list_files.remove(error['name'])

    # ~> Waiting for jobs to finish

    jobs = {}
    jobs_ini = {}
    run_times = {}
    crashed = {'failed': [], 'timeout': []}

    # File is generated by the first run
    # In case no run was launched in the previous command
    if not path.exists(jobid_file):
        actual_len = 0
    else:
        # Building dictionary of jobs:
        with open(jobid_file, 'r') as f:
            for line in f.readlines():
                job_id, action_path = line.split(';')
                if job_id == '':
                    raise TelemacException(\
                       "Error in the job id file. "\
                       "Generated by hpc_runcode in systel.cfg:\n{}"
                       .format(jobid_file))
                jobs[job_id] = action_path.strip('\n')

        jobs_ini.update(jobs)

        # Check job status
        print("  ~> Waiting for completion")
        prev_len = 0
        actual_len = len(jobs)
        # Waiting time between each check in second
        wait_time = 10

        start_time = time.time()
        time.sleep(60)

    while actual_len != 0:
        # Only printing remaining jobs if there was some changes
        if prev_len != actual_len:
            print("Remaining jobs: ", len(jobs))
        t1 = time.time()
        for job_id in list(jobs.keys()):
            state = check_job_slurm(job_id)
            # Job crashed
            if state == 'failed':
                crashed['failed'].append(jobs[job_id])
                del jobs[job_id]
            # job timed out
            elif state == 'timeout':
                crashed['timeout'].append(jobs[job_id])
                del jobs[job_id]
            # Job is done
            elif state == 'success':
                run_time = get_job_time_slurm(job_id)
                run_times[jobs[job_id]] = run_time
                del jobs[job_id]
            # Otherwise job is still running
        t2 = time.time()
        # Only wait if the loop was done in less than wait_time
        if (t2 - t1) < wait_time:
            time.sleep(wait_time)
        # Update info on len
        prev_len = actual_len
        actual_len = len(jobs)

    elapsed_time = time.time() - start_time
    time_str = time.strftime("%H:%M:%S", time.gmtime(elapsed_time))

    print("Waited {} for jobs to complete".format(time_str))

    # Adding run times to the report
    for py_file, run_time in run_times.items():
        # Getting absolute name but same as in jobs
        tmp = py_file.split(sep)
        # Mascaret does not have a temporary folder
        # So not splitting at the same index
        if 'mascaret' in tmp or 'courlis' in tmp:
            abs_py_file = sep.join(tmp[:-1]) + ".py"
            action = tmp[-1]
        else:
            abs_py_file = sep.join(tmp[:-2]) + ".py"
            action = tmp[-2]

        rank = report.values[abs_py_file]['pre']['rank']

        report.add_action(abs_py_file, rank, action, run_time, True)

    # Building new list of files (without the ones that crashed)
    new_list_files = []
    for py_file in run_list_files:
        # Extract folder of validation from script name (minus estension)
        py_folder, _ = path.splitext(py_file)
        failed_action = crashed['timeout'] + crashed['failed']

        # Chekc if that folder is in one of the cases that crashed
        failed = False
        for action in failed_action:
            if py_folder + sep in action:
                failed = True
                break
        # If it is next file
        if failed:
            continue

        new_list_files.append(py_file)

    # Adding exception for all the run that crashed
    for crash_type, failed in crashed.items():
        if failed != []:
            for fail in failed:
                xcpts.add_messages([{
                    'name': fail,
                    'msg': 'The job {}'.format(crash_type)
                }])

    print("  ~> Displaying listing of all runs")
    # Displaying listings (before merging because merging will remove temporary folder)
    for ddir in jobs_ini.values():
        run_dir = ddir.replace('\n', '')
        print('\n\nListing for {}:'\
              .format(run_dir.replace(path.realpath(root), '<root>')))
        # If cmdexec hpc mode listing is in the temporary folder
        if 'hpc_cmdexec' in cfg:
            for ffile in listdir(run_dir):
                if ffile[:4] == 'tmp_' and \
                   path.isdir(path.join(run_dir, ffile)):
                    run_dir = path.join(run_dir, ffile)
                    break

        for ffile in listdir(run_dir):
            if ffile.endswith(".out"):
                with open(path.join(run_dir, ffile), 'r',
                          encoding='utf-8') as f:
                    print(f.read())

    # If we are in hpc_cmdexec configuration (only out_telemac is in the batch
    # job)
    # Running on more pass to do the merge step
    # Second run
    if 'hpc_cmdexec' in cfg:
        print("  ~> Merging part")
        options.vnv_pre = True
        options.vnv_run = True
        options.vnv_post = False
        options.vnv_check = False
        options.bypass = True
        options.merge = True
        options.split = False
        options.run = False
        options.compileonly = False
        # Running only on jobs that finished
        for ifile, py_file in enumerate(sorted(new_list_files)):
            print('\n\nValidation < {}/{} > of {}'\
                  .format(ifile+1, n_files, py_file.replace(root, '<root>')))
            run_python(py_file, options, report, xcpts)

    # Second run
    options.vnv_pre = True
    options.vnv_run = False
    options.vnv_post = True
    options.vnv_check = True
    options.bypass = True
    print("  ~> Check + Post-traitment")
    # Running only on jobs that finished
    for ifile, py_file in enumerate(sorted(new_list_files)):
        print('\n\nValidation < {}/{} > of {}'\
              .format(ifile+1, n_files, py_file.replace(root, '<root>')))
        run_python(py_file, options, report, xcpts, time_from_report=True)
Beispiel #28
0
    def add_columns(self, x, yval):
        """
        Add a new columns to the csv file

        @param x columns ???
        @param yval data ???
        """
        if self.colcore is None:
            xunit = '(-)'
            xname, x_0 = x
            proc = re.match(VAR_BRACKS, xname)
            if proc:
                xname = proc.group('name').strip()
                xunit = proc.group('unit').strip()
            self.colvars = xname
            self.colunits = xunit
            self.colcore = np.array([x_0])
        elif len(x[1]) != len(self.colcore[0]):
            raise TelemacException(
                '... cannot aggregate columns of different supports: '
                '{}'.format(repr(x[0])))
        u_0 = '(-)'
        ynames, y_0 = yval
        dim = len(ynames) - 1
        # ynames[0] is an extra meta data
        if dim == 1:  # /!\ This has been checked
            # ~~> This is called for cast objects, for intance
            n_0 = ynames[1]
            for i_0 in range(len(n_0)):  # each variables
                proc = re.match(VAR_BRACKS, n_0[i_0])
                if proc:
                    n_0[i_0] = proc.group('name').strip()
                    u_0 = proc.group('unit').strip()
                self.rowvars.append(n_0[i_0])
                self.rowunits.append(u_0)
                self.colcore = np.vstack((self.colcore, y_0[i_0]))
        elif dim == 2:
            # ~~> This is called for 1d:history, for instance
            n_0, n_1 = ynames[1:]
            for i_1 in range(len(n_1)):
                for i_0 in range(len(n_0)):
                    proc = re.match(VAR_BRACKS, n_0[i_0])
                    if proc:
                        n_0[i_0] = proc.group('name').strip()
                        u_0 = proc.group('unit').strip()
                    self.rowvars.append(n_0[i_0] + ':' + str(n_1[i_1]))
                    self.rowunits.append(u_0)
                    self.colcore = np.vstack((self.colcore, y_0[i_0][i_1]))
        elif dim == 3:  # /!\ This has been checked
            # ~~> This is called for 1d:v-section, for instance
            n_0, n_1, n_2 = ynames[1:]
            for i_2 in range(len(n_2)):  # each plan
                for i_1 in range(len(n_1)):  # each time
                    for i_0 in range(len(n_0)):  # each variables
                        self.rowvars.append(n_0[i_0] + ':' + str(n_1[i_1]) +
                                            '_' + str(n_2[i_2]))
                        self.rowunits.append(u_0)
                        self.colcore = np.vstack(
                            (self.colcore, y_0[i_0][i_1][i_2]))
        elif dim == 4:
            n_0, n_1, n_2, n_3 = ynames[1:]
            for i_3 in range(len(n_3)):
                for i_2 in range(len(n_2)):
                    for i_1 in range(len(n_1)):
                        for i_0 in range(len(n_0)):
                            self.rowvars.append(n_0[i_0] + ':' +
                                                str(n_1[i_1]) + '_' +
                                                str(n_2[i_2]) + '_' +
                                                str(n_3[i_3]))
                            self.rowunits.append(u_0)
                            self.colcore = np.vstack(
                                (self.colcore, y_0[i_0][i_1][i_2][i_3]))
def run_python(py_file, options, report, xcpts, time_from_report=False):
    """
    Run a vnv Python script

    @param py_file (str) Name of the Python file to run
    @param options (ArgumentParser) Options of the script
    @param report (Report) Contains execution time
    @param xcpts () Error Handler
    @param time_from_report (bool) If true update vnv_study time array by what
    is in the report before running post
    """
    try:
        abs_py_file = path.abspath(py_file)
        if not path.isfile(abs_py_file):
            raise TelemacException(\
               '\nNot able to find your Python file:\n{}'\
               .format(abs_py_file))

        val_dir = path.dirname(abs_py_file)

        chdir(val_dir)
        # Importing vnv_class from py_file
        try:
            # Code foor Python 3.5+
            import importlib.util
            # This allow Python script decalared in the example folder to be
            # loaded
            sys.path.append(val_dir)
            spec = importlib.util.spec_from_file_location(
                "vnv_module", py_file)
            vnv_stuff = importlib.util.module_from_spec(spec)
            spec.loader.exec_module(vnv_stuff)
        except:
            from importlib.machinery import SourceFileLoader

            vnv_stuff = SourceFileLoader("vnv_module", py_file).load_module()

        name = path.splitext(py_file)[0]
        my_vnv_study = vnv_stuff.VnvStudy(name, val_dir, options)

        # Pre-treatment part
        # It is always done
        my_vnv_study.pre()

        if options.api:
            pre_api(my_vnv_study)

        # Cleaning ?
        if options.cleanup or options.full_cleanup:
            my_vnv_study.clean_vnv_working_dir(full=options.full_cleanup)
            return

        # Execution part
        if options.vnv_run:
            chdir(val_dir)
            my_vnv_study.run()

            # cleaning temporary files (if no post):
            if not options.vnv_post:
                for ffile in my_vnv_study.temporary_files:
                    remove(path.join(val_dir, ffile))

        # Check part
        if options.vnv_check:
            chdir(val_dir)
            my_vnv_study.check_results()
            if options.api:
                check_api(my_vnv_study)

        # Post_treatment part
        if options.vnv_post:
            # Update time from what if in the report
            if time_from_report:
                for action, val in report.values[abs_py_file].items():
                    my_vnv_study.action_time[action] = \
                            [val['passed'], val['time']]
            chdir(val_dir)
            my_vnv_study.post()

            # cleaning temporary files:
            for ffile in my_vnv_study.temporary_files:
                remove(path.join(val_dir, ffile))

    except Exception as exc:
        if options.bypass:
            xcpts.add_messages([{'name': py_file, 'msg': str(exc)}])
        else:
            raise exc
    finally:
        #Updating report information
        if my_vnv_study is not None:
            for action, actions in my_vnv_study.action_time.items():
                report.add_action(abs_py_file, my_vnv_study.rank, action,
                                  actions[1], actions[0])
    # Cleaning up sys.path
    if val_dir in sys.path:
        sys.path.remove(val_dir)
def main():
    """ Main function of convertToSPE """

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Dependencies towards other modules ~~~~~~~~~~~~~~~~~~~~~~~~~~
    from argparse import ArgumentParser, RawDescriptionHelpFormatter
    from data_manip.formats.selafin import Selafin
    from data_manip.formats.conlim import Conlim
    from pretel.meshes import xys_locate_mesh

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nInterpreting command line options\n' + '~' * 72 + '\n')
    parser = ArgumentParser(
        formatter_class=RawDescriptionHelpFormatter,
        description=('''\n
A script to map spectral outter model results, stored as SELAFIN files,
 onto the
    spatially and time varying boundary of a spatially contained SELAFIN file
    of your choosing (your MESH).
        '''),
        usage=' (--help for help)\n---------\n       => '
        ' %(prog)s  open-bound.cli open-bound.slf in-outer-geo.slf '
        'in-outer-spec.slf out-bound.slf \n---------')
    parser.add_argument(
        "--ll2utm",
        dest="ll2utm",
        default=None,
        help="assume outer file is in lat-long and open-bound file in UTM")
    parser.add_argument("args", default='', nargs=5)
    options = parser.parse_args()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ cli+slf new mesh ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    cli_file = options.args[0]
    if not path.exists(cli_file):
        raise TelemacException('... the provided cli_file does not seem '
                               'to exist: {}\n\n'.format(cli_file))
    geo_file = options.args[1]
    if not path.exists(geo_file):
        raise TelemacException(
            '... the provided geo_file does not seem to exist: '
            '{}\n\n'.format(geo_file))

    # Read the new CLI file to get boundary node numbers
    print('   +> getting hold of the CONLIM file and of its liquid boundaries')
    cli = Conlim(cli_file)
    # Keeping only open boundary nodes
    bor = np.extract(cli.bor['lih'] != 2, cli.bor['n'])

    # Find corresponding (x,y) in corresponding new mesh
    print('   +> getting hold of the GEO file and of its bathymetry')
    geo = Selafin(geo_file)
    if options.ll2utm is not None:
        zone = int(options.ll2utm)
        x, y = to_lat_long(geo.meshx[bor - 1], geo.meshy[bor - 1], zone)
    else:
        x = geo.meshx[bor - 1]
        y = geo.meshy[bor - 1]
    xys = np.vstack((x, y)).T

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ slf+spe existing res ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    slf_file = options.args[2]
    if not path.exists(slf_file):
        raise TelemacException(
            '... the provided slf_file does not seem to exist: '
            '{}\n\n'.format(slf_file))
    slf = Selafin(slf_file)
    slf.set_kd_tree()
    slf.set_mpl_tri()
    spe_file = options.args[3]
    if not path.exists(spe_file):
        raise TelemacException(
            '... the provided slf_file does not seem to exist: '
            '{}\n\n'.format(spe_file))
    spe = Selafin(spe_file)

    print('   +> support extraction')
    # Extract triangles and weigths in 2D
    support2d = []
    ibar = 0
    pbar = ProgressBar(maxval=len(xys)).start()
    for xyi in xys:
        support2d.append(
            xys_locate_mesh(xyi, slf.ikle2, slf.meshx, slf.meshy, slf.tree,
                            slf.neighbours))
        ibar += 1
        pbar.update(ibar)
    pbar.finish()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ writes BND header ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    bnd_file = options.args[4]
    bnd = Selafin('')
    bnd.fole = {}
    bnd.fole.update({'hook': open(bnd_file, 'wb')})
    bnd.fole.update({'name': bnd_file})
    bnd.fole.update({'endian': ">"})  # big endian
    bnd.fole.update({'float': ('f', 4)})  # single precision

    # Meta data and variable names
    bnd.title = spe.title
    # spectrum for new locations / nodes
    for i in range(len(bor)):
        bnd.varnames.append(
            ('F' + ('00' + str(i))[-2:] + ' PT2D' +
             ('000000' + str(bor[i]))[-6:] + '                ')[:16])
        bnd.varunits.append('UI              ')
    bnd.nbv1 = len(bnd.varnames)
    bnd.nvar = bnd.nbv1
    bnd.varindex = range(bnd.nvar)

    # sizes and mesh connectivity / spectrum
    bnd.nplan = spe.nplan
    bnd.ndp2 = spe.ndp2
    bnd.ndp3 = bnd.ndp2
    bnd.npoin2 = spe.npoin2
    bnd.npoin3 = spe.npoin3
    bnd.iparam = spe.iparam
    bnd.ipob2 = spe.ipob2
    bnd.ikle2 = spe.ikle2
    # Last few numbers
    bnd.nelem2 = len(bnd.ikle2)
    bnd.nelem3 = bnd.nelem2
    bnd.ipob3 = bnd.ipob2
    bnd.ikle3 = bnd.ikle2
    # Mesh coordinates
    bnd.meshx = spe.meshx
    bnd.meshy = spe.meshy

    print('   +> writing header')
    # Write header
    bnd.append_header_slf()

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ writes BND core ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    print('   +> setting variables')
    # TIME and DATE extraction
    bnd.datetime = spe.datetime
    bnd.tags['times'] = spe.tags['times']

    # pointer initialisation
    f = spe.file['hook']
    endian = spe.file['endian']
    ftype, fsize = spe.file['float']

    # Identofy variables (required for support2d geo-locations)
    specloc = []
    for n, _ in support2d:
        specloc.extend(n)
    vars_indexes = np.unique(specloc)
    if fsize == 4:
        z = np.zeros((len(vars_indexes), spe.npoin2), dtype=np.float32)
        data = np.zeros(spe.npoin2, dtype=np.float32)
    else:
        z = np.zeros((len(vars_indexes), spe.npoin2), dtype=np.float64)
        data = np.zeros(spe.npoin2, dtype=np.float64)

    # Read / Write data, one time step at a time to support large files
    print('   +> reading / writing variables')
    pbar = ProgressBar(maxval=len(spe.tags['times'])).start()
    for itime in range(len(spe.tags['times'])):
        f.seek(spe.tags['cores'][itime])  # [itime] is the frame
        # to be extracted
        f.seek(4 + fsize + 4, 1)  # the file pointer is initialised
        bnd.append_core_time_slf(itime)

        # Extract relevant spectrum, where
        #  vars_indexes only contains the relevant nodes
        #  jvar varies from 0 to len(vars_indexes)
        jvar = 0
        for ivar in range(spe.nvar):
            # the file pointer advances through all records to keep on track
            f.seek(4, 1)
            if ivar in vars_indexes:
                z[jvar, :] = unpack(endian + str(spe.npoin2) + ftype,
                                    f.read(fsize * spe.npoin2))
                jvar += 1
            else:
                # the file pointer advances through
                # all records to keep on track
                f.seek(fsize * spe.npoin2, 1)
            f.seek(4, 1)

        # linear interpolation
        ivar = 0
        for b_n, l_n in support2d:
            data[:] = 0.
            for inod in range(len(b_n)):
                jvar = np.where(vars_indexes == b_n[inod])[0][0]
                data += l_n[inod] * z[jvar, :]
            bnd.append_core_vars_slf([data])
            ivar += 1

        pbar.update(itime)
    pbar.finish()

    # Close bnd_file
    bnd.fole['hook'].close()

    print('   +> writing out the file with coordinate to impose')
    dat = [str(len(bor)) + ' 0']
    for i in np.sort(bor):
        dat.append(
            str(i) + ' ' + repr(geo.meshx[i - 1]) + ' ' +
            repr(geo.meshy[i - 1]) + ' 0.0')
    put_file_content(bnd_file + '.dat', dat)

    # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
    # ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    print('\n\nMy work is done\n\n')

    sys.exit(0)