def __init__(self,fileName):

      # ~~> Read the steering file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      if not path.exists(fileName):
         print '... Could not file your DELWAQ file: ',fileName
         sys.exit(1)
      self.dwqList = self.parseDWQ(getFileContent(fileName))

      # ~~> Read the geometry file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      fle = self.dwqList['grid-indices-file']
      if not path.exists(fle):
         print '...Could not find the GEO file: ',fle
         sys.exit(1)
      self.geo = SELAFIN(fle)
      self.NPOIN3 = int(self.dwqList['grid-cells-first-direction'])
      if self.NPOIN3 != self.geo.NPOIN3:
         print '...In consistency in numbers with GEO file: ',self.NPOIN3,self.geo.NPOIN3
         sys.exit(1)
      self.NSEG3 = int(self.dwqList['grid-cells-second-direction'])

      # ~~> Read the CONLIM file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      fle = self.dwqList['grid-coordinates-file']
      if not path.exists(fle):
         print '...Could not find the CONLIM file: ',fle
         sys.exit(1)
      self.conlim = CONLIM(fle)

      # ~~> Time records ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      self.HYDRO0T = int(self.dwqList['hydrodynamic-start-time'])
      self.HYDROAT = int(self.dwqList['hydrodynamic-stop-time'])
      self.HYDRODT = int(self.dwqList['hydrodynamic-timestep'])
      self.HYDROIT = 1 + ( self.HYDROAT-self.HYDRO0T ) / self.HYDRODT
      self.HYDRO00 = 0
      self.tfrom = self.HYDRO0T
      self.tstop = self.HYDROAT
Example #2
0
    def __init__(self, fileName):

        # ~~> Read the steering file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        if not path.exists(fileName):
            print "... Could not file your DELWAQ file: ", fileName
            sys.exit(1)
        self.dwqList = self.parseDWQ(getFileContent(fileName))

        # ~~> Read the geometry file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        fle = self.dwqList["grid-indices-file"]
        if not path.exists(fle):
            print "...Could not find the GEO file: ", fle
            sys.exit(1)
        self.geo = SELAFIN(fle)
        self.NPOIN3 = int(self.dwqList["grid-cells-first-direction"])
        if self.NPOIN3 != self.geo.NPOIN3:
            print "...In consistency in numbers with GEO file: ", self.NPOIN3, self.geo.NPOIN3
            sys.exit(1)
        self.NSEG3 = int(self.dwqList["grid-cells-second-direction"])

        # ~~> Read the CONLIM file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        fle = self.dwqList["grid-coordinates-file"]
        if not path.exists(fle):
            print "...Could not find the CONLIM file: ", fle
            sys.exit(1)
        self.conlim = CONLIM(fle)

        # ~~> Time records ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        self.HYDRO0T = int(self.dwqList["hydrodynamic-start-time"])
        self.HYDROAT = int(self.dwqList["hydrodynamic-stop-time"])
        self.HYDRODT = int(self.dwqList["hydrodynamic-timestep"])
        self.HYDROIT = 1 + (self.HYDROAT - self.HYDRO0T) / self.HYDRODT
        self.HYDRO00 = 0
        self.tfrom = self.HYDRO0T
        self.tstop = self.HYDROAT
Example #3
0
def getXYn(file):
    # TODO: Read the whole header, for the time being head is copied
    #       over
    # TODO: Read multiple variables depending on type and on a list

    # ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    core = getFileContent(file)

    # ~~ Parse head ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    icore = 0
    fileType = None
    while re.match(ken_header, core[icore]):
        # ~~> instruction FileType
        proc = re.match(asc_FileType, core[icore])
        if proc: fileType = proc.group('type').lower()
        # ... more instruction coming ...
        icore += 1
    head = core[0:icore]
    if fileType == None:
        proc = re.match(var_3dbl, core[icore] + ' ')
        if not proc:
            proc = re.match(var_2dbl, core[icore] + ' ')
            if not proc:
                print '\nCould not parse the first record: ' + core[icore]
                sys.exit(1)
            else:
                fileType = 'xy'
        else:
            fileType = 'xyz'

    # /!\ icore starts the body

    # ~~ Parse body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # This is also fairly fast, so you might not need a progress bar
    xyz = []  #; pbar = ProgressBar(maxval=len(core)).start()
    while icore < len(core):
        if fileType == 'xy':
            proc = re.match(var_2dbl, core[icore] + ' ')
            if not proc:
                print '\nCould not parse the following xyz record: ' + core[
                    icore]
                sys.exit(1)
            xyz.append(
                [float(proc.group('number1')),
                 float(proc.group('number2'))])
        elif fileType == 'xyz':
            proc = re.match(var_3dbl, core[icore] + ' ')
            if not proc:
                print '\nCould not parse the following xyz record: ' + core[
                    icore]
                sys.exit(1)
            xyz.append([
                float(proc.group('number1')),
                float(proc.group('number2')),
                float(proc.group('number3'))
            ])
        icore += 1
    #pbar.finish()

    return head, fileType, xyz
def getINSEL(file):
   # ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   core = getFileContent(file)
   if not re.match(dat_footer,core[len(core)-1]):
      print '\nCould not parse the following end line of the file: '+core[len(core)-1]
      sys.exit(1)

   # ~~ First scan at INSEL and DAMM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   # This is also fairly fast, so you might not need a progress bar
   core.pop(len(core)-1)
   poly = []; vals = []; typ = []; npoin = 0
   iline = 0; xyi = []; val = []; fileType = True
   while iline < len(core):
      proco = re.match(dat_openh,core[iline])
      if proco: t = 0
      procc = re.match(dat_closh,core[iline])
      if procc: t = 1
      if proco or procc:
         iline += 1
         if xyi != []:
            poly.append(xyi); npoin += len(xyi); typ.append(t); vals.append(val)
            xyi = []; val = []
      else:
         proc = re.match(var_3dbl,core[iline].strip())
         if proc:
            xyi.append((proc.group('number1'),proc.group('number2')))
            val.append(proc.group('number3'))
         else:
            fileType = False
            proc = re.match(var_2dbl,core[iline].strip())
            if proc:
               xyi.append((proc.group('number1'),proc.group('number2')))
               val.append('')
            else:
               print '\nCould not parse the following polyline record: '+core[iline]
               sys.exit(1)
      iline += 1
   poly.append(xyi); npoin += len(xyi); typ.append(t); vals.append(val)

   # ~~ Second scan at INSEL and DAMM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   # This is also fairly fast, so you might not need a progress bar
   if fileType:
      for pline in range(len(poly)):
         for iline in range(len(poly[pline])):
            a,b = poly[pline][iline]
            poly[pline][iline] = [ float(a),float(b) ]
            vals[pline][iline] = [ float(c) ]
         poly[pline] = np.asarray(poly[pline])
   else:
      for pline in range(len(poly)):
         for iline in range(len(poly[pline])):
            a,b = poly[pline][iline]
            poly[pline][iline] = [ float(a),float(b) ]
         poly[pline] = np.asarray(poly[pline])
         vals = []

   return fileType,npoin,poly,vals,typ
Example #5
0
def getLQD(file):
    # ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    core = getFileContent(file)

    # ~~ Parse head ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    icore = 0
    while re.match(lqd_header, core[icore]):
        icore += 1
    head = core[0:icore]
    # /!\ icore starts the body

    # ~~ Parse variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    jcore = icore + 1
    while icore < len(core) and jcore < len(core):
        if re.match(lqd_header, core[icore]):
            icore += 1
            jcore += 1
            continue
        if re.match(lqd_header, core[jcore]):
            jcore += 1
            continue
        core[icore].replace(",", " ")
        core[jcore].replace(",", " ")
        # ~~> Variable header
        if core[icore].split()[0].upper() != "T":
            print "\nThought I would find T for this LQD file on this line: " + core[
                icore]
            sys.exit(1)
        if len(core[icore].split()) != len(core[jcore].split()):
            print "\nThought I to corresponding units for this LQD file on this line: " + core[
                jcore]
            sys.exit(1)
        vrs = zip(core[icore].upper().split(), core[jcore].upper().split())

    # ~~ Size valid values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    icore = jcore + 1
    while icore < len(core):
        if not re.match(lqd_header, core[jcore]):
            icore += 1

    # ~~ Parse body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # This is also fairly fast, so you might not need a progress bar
    t = np.zeros(jcore - icore)
    z = np.zeros(len(vrs) - 1, jcore - icore)
    itime = 0
    for icore in core[jcore + 1:]:
        if re.match(lqd_header, icore):
            continue
        values = icore.replace(",", " ").split()
        t[itime] = float(values[0])
        for ivar in range(len(values[1:])):
            z[itime][ivar] = float(values[ivar])

    return head, vrs[1:], t, z
def getXYn(file):
   # TODO: Read the whole header, for the time being head is copied
   #       over
   # TODO: Read multiple variables depending on type and on a list

   # ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   core = getFileContent(file)

   # ~~ Parse head ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   icore = 0; fileType = None
   while re.match(ken_header,core[icore]):
      # ~~> instruction FileType
      proc = re.match(asc_FileType,core[icore])
      if proc: fileType = proc.group('type').lower()
      # ... more instruction coming ...
      icore += 1
   head = core[0:icore]
   if fileType == None:
      proc = re.match(var_3dbl,core[icore]+' ')
      if not proc:
         proc = re.match(var_2dbl,core[icore]+' ')
         if not proc:
            print '\nCould not parse the first record: '+core[icore]
            sys.exit(1)
         else: fileType = 'xy'
      else: fileType = 'xyz'

   # /!\ icore starts the body

   # ~~ Parse body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   # This is also fairly fast, so you might not need a progress bar
   xyz = [] #; pbar = ProgressBar(maxval=len(core)).start()
   while icore < len(core):
      if fileType == 'xy':
         proc = re.match(var_2dbl,core[icore]+' ')
         if not proc:
            print '\nCould not parse the following xyz record: '+core[icore]
            sys.exit(1)
         xyz.append([float(proc.group('number1')),float(proc.group('number2'))])
      elif fileType == 'xyz':
         proc = re.match(var_3dbl,core[icore]+' ')
         if not proc:
            print '\nCould not parse the following xyz record: '+core[icore]
            sys.exit(1)
         xyz.append([float(proc.group('number1')),float(proc.group('number2')),float(proc.group('number3'))])
      icore += 1
   #pbar.finish()

   return head,fileType,xyz
Example #7
0
def getLQD(file):
   # ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   core = getFileContent(file)

   # ~~ Parse head ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   icore = 0
   while re.match(lqd_header,core[icore]): icore += 1
   head = core[0:icore]
   # /!\ icore starts the body

   # ~~ Parse variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   jcore = icore+1
   while icore < len(core) and jcore < len(core):
      if re.match(lqd_header,core[icore]):
         icore += 1; jcore += 1
         continue
      if re.match(lqd_header,core[jcore]):
         jcore += 1
         continue
      core[icore].replace(',',' ')
      core[jcore].replace(',',' ')
      # ~~> Variable header
      if core[icore].split()[0].upper() != 'T':
         print '\nThought I would find T for this LQD file on this line: '+core[icore]
         sys.exit(1)
      if len(core[icore].split()) != len(core[jcore].split()):
         print '\nThought I to corresponding units for this LQD file on this line: '+core[jcore]
         sys.exit(1)
      vrs = zip( core[icore].upper().split(),core[jcore].upper().split() )

   # ~~ Size valid values ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   icore = jcore+1
   while icore < len(core):
      if not re.match(lqd_header,core[jcore]): icore += 1

   # ~~ Parse body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   # This is also fairly fast, so you might not need a progress bar
   t = np.zeros( jcore-icore )
   z = np.zeros( len(vrs)-1,jcore-icore )
   itime = 0
   for icore in core[jcore+1:]:
      if re.match(lqd_header,icore): continue
      values = icore.replace(',',' ').split()
      t[itime] = float(values[0])
      for ivar in range(len(values[1:])): z[itime][ivar] = float(values[ivar])

   return head,vrs[1:],t,z
   def __init__(self,fileName):
      self.fileName = fileName
      # ~~> Columns of integers and floats
      DTYPE = np.dtype=[('lih','<i4'),('liu','<i4'),('liv','<i4'), \
                          ('h','<f4'),('u','<f4'),('v','<f4'),('au','<f4'), \
         ('lit','<i4'),   ('t','<f4'),('at','<f4'),('bt','<f4'), \
         ('n','<i4'),('c','<i4') ]

      if fileName != '':
         # ~~> Number of boundary points ( tuple() necessary for dtype parsing )
         #core = [ tuple((' '.join(c.strip().split()[0:13])+' 0 0.0 0.0 0').split()) for c in getFileContent(fileName) ]
         core = [ tuple(c.strip().split()[0:13]) for c in getFileContent(fileName) ]
         self.NPTFR = len(core)
         self.BOR = np.array( core, DTYPE )
         # ~~> Dictionary of KFRGL
         self.KFRGL = dict(zip( self.BOR['n']-1,range(self.NPTFR) ))

         # ~~> Filtering indices
         self.INDEX = np.array( range(self.NPTFR),dtype=np.int )
 def getFileContent(self,fileName):
    if not path.exists(fileName):
       print '... could not find your CSV file: ',fileName
       sys.exit(1)
    self.sortie = getFileContent(fileName)
Example #10
0
def translateCAS(cas, frgb):
    casLines = getFileContent(cas)

    core = []
    for i in range(len(casLines)):
        # ~~> scan through to remove all comments
        casLines[i] = casLines[i].replace('"""', "'''").replace('"', "'")
        proc = re.match(key_comment, casLines[i] + "/")
        head = proc.group("before").strip()
        core.append(head)
    casStream = " ".join(core)

    frLines = []
    gbLines = []
    for i in range(len(casLines)):

        # ~~> split comments
        casLines[i] = casLines[i].replace('"""', "'''").replace('"', "'")
        proc = re.match(key_comment, casLines[i] + "/")
        head = proc.group("before").strip()
        tail = proc.group("after").rstrip("/").strip()  # /!\ is not translated
        # ~~ special keys starting with '&'
        p = re.match(key_none, head + " ")
        if p:
            head = ""
            tail = casLines[i].strip()
        frline = head
        gbline = head

        if head != "" and casStream == "":
            raise Exception(
                [{"name": "translateCAS", "msg": "could not translate this cas file after the line:\n" + head}]
            )
        # ~~> this is a must for multiple keywords on one line
        while casStream != "":
            proc = re.match(key_equals, casStream)
            if not proc:
                raise Exception(
                    [
                        {
                            "name": "scanCAS",
                            "msg": "... hmmm, did not see this one coming ...\n   around there :" + casStream[:100],
                        }
                    ]
                )
            kw = proc.group("key").strip()
            if kw not in head:
                break  # move on to next line

            # ~~> translate the keyword
            head = head.replace(kw, "", 1)
            if kw.upper() in frgb["GB"]:
                frline = frline.replace(kw, frgb["GB"][kw], 1)
            if kw.upper() in frgb["FR"]:
                gbline = gbline.replace(kw, frgb["FR"][kw], 1)

            # ~~> look for less obvious keywords
            casStream = proc.group("after")  # still hold the separator
            proc = re.match(val_equals, casStream)
            if not proc:
                raise Exception([{"name": "translateCAS", "msg": "no value to keyword: " + kw}])
            while proc:
                casStream = proc.group("after")  # still hold the separator
                proc = re.match(val_equals, casStream)

        # final append
        if frline != "":
            frline = " " + frline
        frLines.append(frline + tail)
        if gbline != "":
            gbline = " " + gbline
        gbLines.append(gbline + tail)

    # ~~ print FR and GB versions of the CAS file
    putFileContent(cas + ".fr", frLines)
    putFileContent(cas + ".gb", gbLines)

    return cas + ".fr", cas + ".gb"
def translateCAS(cas,frgb):
   casLines = getFileContent(cas)

   core = []
   for i in range(len(casLines)):
      # ~~> scan through to remove all comments
      casLines[i] = casLines[i].replace('"""',"'''").replace('"',"'")
      proc = re.match(key_comment,casLines[i]+'/')
      head = proc.group('before').strip()
      core.append(head)
   casStream = ' '.join(core)

   frLines = []; gbLines = []
   for i in range(len(casLines)):

      # ~~> split comments
      casLines[i] = casLines[i].replace('"""',"'''").replace('"',"'")
      proc = re.match(key_comment,casLines[i]+'/')
      head = proc.group('before').strip()
      tail = proc.group('after').rstrip('/').strip()  # /!\ is not translated
      # ~~ special keys starting with '&'
      p = re.match(key_none,head+' ')
      if p:
         head = ''
         tail = casLines[i].strip()
      frline = head
      gbline = head

      if head != '' and casStream == '':
         raise Exception([{'name':'translateCAS','msg':'could not translate this cas file after the line:\n'+head}])
      # ~~> this is a must for multiple keywords on one line
      while casStream != '':
         proc = re.match(key_equals,casStream)
         if not proc:
            raise Exception([{'name':'scanCAS','msg':'... hmmm, did not see this one coming ...\n   around there :'+casStream[:100]}])
         kw = proc.group('key').strip()
         if kw not in head: break  # move on to next line

         # ~~> translate the keyword
         head = head.replace(kw,'',1)
         if kw.upper() in frgb['GB']: frline = frline.replace(kw,frgb['GB'][kw],1)
         if kw.upper() in frgb['FR']: gbline = gbline.replace(kw,frgb['FR'][kw],1)

         # ~~> look for less obvious keywords
         casStream = proc.group('after')   # still hold the separator
         proc = re.match(val_equals,casStream)
         if not proc:
            raise Exception([{'name':'translateCAS','msg':'no value to keyword: '+kw}])
         while proc:
            casStream = proc.group('after')   # still hold the separator
            proc = re.match(val_equals,casStream)

      # final append
      if frline != '': frline = ' ' + frline
      frLines.append(frline + tail)
      if gbline != '': gbline = ' ' + gbline
      gbLines.append(gbline + tail)

   # ~~ print FR and GB versions of the CAS file
   putFileContent(cas+'.fr',frLines)
   putFileContent(cas+'.gb',gbLines)

   return cas+'.fr',cas+'.gb'
      sys.exit(1)

# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Works for all configurations unless specified ~~~~~~~~~~~~~~~
   cfgs = parseConfigFile(options.configFile,options.configName)

   #  /!\  for testing purposes ... no real use
   for cfgname in cfgs:
      # still in lower case
      if not cfgs[cfgname].has_key('root'): cfgs[cfgname]['root'] = PWD
      if options.rootDir != '': cfgs[cfgname]['root'] = options.rootDir
      # parsing for proper naming
      if options.rank != '': cfgs[cfgname]['val_rank'] = options.rank
      cfg = parseConfig_ValidateTELEMAC(cfgs[cfgname])

      debug = True

      for mod in cfg['VALIDATION']:
# ~~ Scans all CAS files to launch validation ~~~~~~~~~~~~~~~~~~~~~~
         print '\n\nConfiguration ' + cfgname + ', Module '+ mod + '\n'+'~'*72+'\n'
         print '... reading module dictionary'
         frgb,dico = scanDICO(path.join(path.join(cfg['MODULES'][mod]['path'],'lib'),mod+'.dico'))
         for casFile in cfg['VALIDATION'][mod]:
            print '... CAS file: ',casFile
            casKeys = readCAS(scanCAS(getFileContent(casFile)),dico,frgb)
               #/!\ for testing purposes ... no real use.
               #/!\ Note that casKeys is made of lines,(keys,values)

   sys.exit(0)

Example #13
0
def scanDICO(dicoFile):
    keylist = []
    dicoLines = getFileContent(dicoFile)
    # ~~ buddle continuations (long strings) and remove comments and empty lines
    core = []
    i = -1
    while i < len(dicoLines) - 1:
        i = i + 1
        line = ''
        l = dicoLines[i].strip()
        #proc = re.match(key_comment,l)
        #if proc: l = proc.group('before').strip() + ' '
        if l.strip()[0:1] == '/': continue
        proc = re.match(emptyline, l)
        if proc: continue
        proc = re.match(key_none, l)
        if proc: continue
        proc = re.match(entryquote, l)
        line = proc.group('before')
        l = proc.group('after')
        while l != '':
            if l[0:1] == '"':
                proc = re.match(exitdquote, l + ' ')
                if proc:
                    line = line + "'" + proc.group('before').replace("'",
                                                                     '"') + "'"
                    proc = re.match(entryquote, proc.group('after').strip())
                    line = line + proc.group('before')
                    l = proc.group('after').strip()
                    print '>', l
                else:
                    i = i + 1
                    l = l.strip() + ' ' + dicoLines[i].strip()
            elif l[0:1] == "'":
                proc = re.match(exitsquote, l + ' ')
                if proc:
                    line = line + "'" + proc.group('before').replace("'",
                                                                     '"') + "'"
                    proc = re.match(entryquote, proc.group('after').strip())
                    line = line + proc.group('before')
                    l = proc.group('after').strip()
                else:
                    i = i + 1
                    l = l.strip() + ' ' + dicoLines[i].strip()
        core.append(line)
    dicoStream = (' '.join(core)).replace('  ', ' ').replace('""', '"')
    # ~~ clean values to keep only the keys
    while dicoStream != '':
        # ~~ non-key
        proc = re.match(key_none, dicoStream)
        if proc:
            dicoStream = proc.group('after')
            continue
        # ~~ key
        proc = re.match(key_equals, dicoStream)
        if not proc: break
        kw = proc.group('key').strip()
        if kw not in dicokeys:
            print 'unknown key ', kw, proc.group('after'), dicoStream
            sys.exit(1)
        dicoStream = proc.group('after')  # still hold the separator
        # ~~ val
        proc = re.match(val_equals, dicoStream)
        if not proc:
            print 'no value to keyword ', kw
            sys.exit(1)
        val = []
        while proc:
            if proc.group('val')[0] == "'":
                val.append(proc.group('val')[1:len(proc.group('val')) - 1])
            else:
                val.append(proc.group('val'))
            dicoStream = proc.group('after')  # still hold the separator
            proc = re.match(val_equals, dicoStream)
        keylist.append([kw, val])
    # ~~ sort out the groups, starting with 'NOM'
    dico = {
        'FR': {},
        'GB': {},
        'DICO': dicoFile
    }
    keywords = {}
    while keylist != []:
        if keylist[0][0] != 'NOM' and keylist[1][0] != 'NOM1':
            print 'could not read NOM or NOM1 from ', keylist[0][1]
            sys.exit(1)
        dico['FR'].update({
            keylist[0][1][0].replace('"', "'"):
            keylist[1][1][0].replace('"', "'")
        })
        dico['GB'].update({
            keylist[1][1][0].replace('"', "'"):
            keylist[0][1][0].replace('"', "'")
        })
        key = keylist[0][1][0].replace('"', "'")
        words = {}
        words = {'NOM': keylist[0][1]}
        keylist.pop(0)
        while keylist != []:
            if keylist[0][0] == 'NOM': break
            words.update({keylist[0][0]: keylist[0][1]})
            keylist.pop(0)
        keywords.update({key: words})

    return dico, keywords
Example #14
0
 def getFileContent(self, fileName):
     if not path.exists(fileName):
         print '... could not find your CSV file: ', fileName
         sys.exit(1)
     self.sortie = getFileContent(fileName)
Example #15
0
# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
# ~~~~ Works for all configurations unless specified ~~~~~~~~~~~~~~~
    cfgs = parseConfigFile(options.configFile, options.configName)

    #  /!\  for testing purposes ... no real use
    for cfgname in cfgs:
        # still in lower case
        if not cfgs[cfgname].has_key('root'): cfgs[cfgname]['root'] = PWD
        if options.rootDir != '': cfgs[cfgname]['root'] = options.rootDir
        # parsing for proper naming
        if options.rank != '': cfgs[cfgname]['val_rank'] = options.rank
        cfg = parseConfig_ValidateTELEMAC(cfgs[cfgname])

        debug = True

        for mod in cfg['VALIDATION']:
            # ~~ Scans all CAS files to launch validation ~~~~~~~~~~~~~~~~~~~~~~
            print '\n\nConfiguration ' + cfgname + ', Module ' + mod + '\n' + '~' * 72 + '\n'
            print '... reading module dictionary'
            frgb, dico = scanDICO(
                path.join(path.join(cfg['MODULES'][mod]['path'], 'lib'),
                          mod + '.dico'))
            for casFile in cfg['VALIDATION'][mod]:
                print '... CAS file: ', casFile
                casKeys = readCAS(scanCAS(getFileContent(casFile)), dico, frgb)
                #/!\ for testing purposes ... no real use.
                #/!\ Note that casKeys is made of lines,(keys,values)

    sys.exit(0)
Example #16
0
def translateCAS(cas, frgb):
    casLines = getFileContent(cas)

    core = []
    for i in range(len(casLines)):
        # ~~> scan through to remove all comments
        casLines[i] = casLines[i].replace('"""', "'''").replace('"', "'")
        proc = re.match(key_comment, casLines[i] + '/')
        head = proc.group('before').strip()
        core.append(head)
    casStream = ' '.join(core)

    frLines = []
    gbLines = []
    for i in range(len(casLines)):

        # ~~> split comments
        casLines[i] = casLines[i].replace('"""', "'''").replace('"', "'")
        proc = re.match(key_comment, casLines[i] + '/')
        head = proc.group('before').strip()
        tail = proc.group('after').rstrip('/').strip()  # /!\ is not translated
        # ~~ special keys starting with '&'
        p = re.match(key_none, head + ' ')
        if p:
            head = ''
            tail = casLines[i].strip()
        frline = head
        gbline = head

        if head != '' and casStream == '':
            raise Exception([{
                'name':
                'translateCAS',
                'msg':
                'could not translate this cas file after the line:\n' + head
            }])
        # ~~> this is a must for multiple keywords on one line
        while casStream != '':
            proc = re.match(key_equals, casStream)
            if not proc:
                raise Exception([{
                    'name':
                    'scanCAS',
                    'msg':
                    '... hmmm, did not see this one coming ...\n   around there :'
                    + casStream[:100]
                }])
            kw = proc.group('key').strip()
            if kw not in head: break  # move on to next line

            # ~~> translate the keyword
            head = head.replace(kw, '', 1)
            if kw.upper() in frgb['GB']:
                frline = frline.replace(kw, frgb['GB'][kw], 1)
            if kw.upper() in frgb['FR']:
                gbline = gbline.replace(kw, frgb['FR'][kw], 1)

            # ~~> look for less obvious keywords
            casStream = proc.group('after')  # still hold the separator
            proc = re.match(val_equals, casStream)
            if not proc:
                raise Exception([{
                    'name': 'translateCAS',
                    'msg': 'no value to keyword: ' + kw
                }])
            while proc:
                casStream = proc.group('after')  # still hold the separator
                proc = re.match(val_equals, casStream)

        # final append
        if frline != '': frline = ' ' + frline
        frLines.append(frline + tail)
        if gbline != '': gbline = ' ' + gbline
        gbLines.append(gbline + tail)

    # ~~ print FR and GB versions of the CAS file
    putFileContent(cas + '.fr', frLines)
    putFileContent(cas + '.gb', gbLines)

    return cas + '.fr', cas + '.gb'
Example #17
0
def scanDICO(dicoFile):
    keylist = []
    dicoLines = getFileContent(dicoFile)
    # ~~ buddle continuations (long strings) and remove comments and empty lines
    core = []
    i = -1
    while i < len(dicoLines) - 1:
        i = i + 1
        line = ""
        l = dicoLines[i].strip()
        # proc = re.match(key_comment,l)
        # if proc: l = proc.group('before').strip() + ' '
        if l.strip()[0:1] == "/":
            continue
        proc = re.match(emptyline, l)
        if proc:
            continue
        proc = re.match(key_none, l)
        if proc:
            continue
        proc = re.match(entryquote, l)
        line = proc.group("before")
        l = proc.group("after")
        while l != "":
            if l[0:1] == '"':
                proc = re.match(exitdquote, l + " ")
                if proc:
                    line = line + "'" + proc.group("before").replace("'", '"') + "'"
                    proc = re.match(entryquote, proc.group("after").strip())
                    line = line + proc.group("before")
                    l = proc.group("after").strip()
                    print ">", l
                else:
                    i = i + 1
                    l = l.strip() + " " + dicoLines[i].strip()
            elif l[0:1] == "'":
                proc = re.match(exitsquote, l + " ")
                if proc:
                    line = line + "'" + proc.group("before").replace("'", '"') + "'"
                    proc = re.match(entryquote, proc.group("after").strip())
                    line = line + proc.group("before")
                    l = proc.group("after").strip()
                else:
                    i = i + 1
                    l = l.strip() + " " + dicoLines[i].strip()
        core.append(line)
    dicoStream = (" ".join(core)).replace("  ", " ").replace('""', '"')
    # ~~ clean values to keep only the keys
    while dicoStream != "":
        # ~~ non-key
        proc = re.match(key_none, dicoStream)
        if proc:
            dicoStream = proc.group("after")
            continue
        # ~~ key
        proc = re.match(key_equals, dicoStream)
        if not proc:
            break
        kw = proc.group("key").strip()
        if kw not in dicokeys:
            print "unknown key ", kw, proc.group("after"), dicoStream
            sys.exit(1)
        dicoStream = proc.group("after")  # still hold the separator
        # ~~ val
        proc = re.match(val_equals, dicoStream)
        if not proc:
            print "no value to keyword ", kw
            sys.exit(1)
        val = []
        while proc:
            if proc.group("val")[0] == "'":
                val.append(proc.group("val")[1 : len(proc.group("val")) - 1])
            else:
                val.append(proc.group("val"))
            dicoStream = proc.group("after")  # still hold the separator
            proc = re.match(val_equals, dicoStream)
        keylist.append([kw, val])
    # ~~ sort out the groups, starting with 'NOM'
    dico = {"FR": {}, "GB": {}, "DICO": dicoFile}
    keywords = {}
    while keylist != []:
        if keylist[0][0] != "NOM" and keylist[1][0] != "NOM1":
            print "could not read NOM or NOM1 from ", keylist[0][1]
            sys.exit(1)
        dico["FR"].update({keylist[0][1][0].replace('"', "'"): keylist[1][1][0].replace('"', "'")})
        dico["GB"].update({keylist[1][1][0].replace('"', "'"): keylist[0][1][0].replace('"', "'")})
        key = keylist[0][1][0].replace('"', "'")
        words = {}
        words = {"NOM": keylist[0][1]}
        keylist.pop(0)
        while keylist != []:
            if keylist[0][0] == "NOM":
                break
            words.update({keylist[0][0]: keylist[0][1]})
            keylist.pop(0)
        keywords.update({key: words})

    return dico, keywords
Example #18
0
   def parseContent(self,fileName):
      # TODO: Read the whole header

      # ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      core = getFileContent(fileName)

      # ~~ Parse head ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      icore = 0; self.atrbut = {}; self.natrbut = 0; self.oatrbut = []
      while re.match(ken_header,core[icore].strip()):
         # ~~> instruction FileType
         proc = re.match(asc_FileType,core[icore].strip())
         if proc: self.fileType = proc.group('type').lower()
         # ~~> instruction AttributeName
         proc = re.match(asc_AttributeName,core[icore].strip())
         if proc:
            self.natrbut += 1
            if self.natrbut == int(proc.group('number')):
               self.oatrbut.append(proc.group('after').strip())
               self.atrbut.update({self.oatrbut[-1]:[]})
            else:
               print '... Could not read the order of your Attributes:',core[icore]
               sys.exit(1)
         # ... more instruction coming ...
         icore += 1
      self.head = core[0:icore]
      # /!\ icore starts the body

      # ~~ Parse body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      # This is also fairly fast, so you might not need a progress bar
      self.poly = []; self.vals = []; self.type = []; self.npoin = 0
      while icore < len(core):
         if core[icore].strip() == '':
            icore += 1
            continue
         # ~~> polygon head
         proc = re.match(var_1int,core[icore].strip())
         if not proc:
            print '\nCould not parse the following polyline header: '+core[icore].strip()
            sys.exit(1)
         nrec = int(proc.group('number'))
         a = proc.group('after').strip().split()
         if len(a) != self.natrbut:
            if self.natrbut != 0:
               print '... Could not find the correct number of attribute:',core[icore].strip(),', ',self.natrbut,' expected'
               sys.exit(1)
            else:
               self.natrbut = len(a)
               self.oatrbut = range(1,len(a)+1)
               self.atrbut = dict([ (i+1,[a[i]]) for i in range(len(a)) ])
         else:
            for i in range(len(self.oatrbut)): self.atrbut[self.oatrbut[i]].append(a[i])
         xyi = []; val = []; icore += 1
         for irec in range(nrec):
            nbres = core[icore+irec].strip().split()
            proc = re.match(var_1dbl,nbres[0])
            if not proc:
               proc = re.match(var_1int,nbres[0])
               if not proc:
                  print '\nCould not parse the following polyline record: '+core[icore+irec].strip()
                  sys.exit(1)
            nbres[0] = float(proc.group('number'))
            procd = re.match(var_1dbl,nbres[1])
            proci = re.match(var_1int,nbres[1])
            if procd: nbres[1] = float(procd.group('number'))
            elif proci: nbres[1] = float(procd.group('number'))
            xyi.append(nbres[:2])
            val.append(nbres[2:])
         if xyi != []:
            cls = 0
            if isClose(xyi[0],xyi[len(xyi)-1],size=10) :
               xyi.pop(len(xyi)-1)
               val.pop(len(val)-1)
               cls = 1
            self.poly.append(np.asarray(xyi,dtype=np.float))
            self.vals.append(np.asarray(val,dtype=np.float))
            self.type.append(cls)
         self.npoin += len(xyi)
         icore += nrec

      self.npoly = len(self.poly)
def scanDICO(dicoFile):
   keylist = []
   dicoLines = getFileContent(dicoFile)
   # ~~ buddle continuations (long strings) and remove comments and empty lines
   core = []; i = -1
   while i < len(dicoLines) - 1:
      i = i + 1; line = ''
      l = dicoLines[i].strip()
      #proc = re.match(key_comment,l)
      #if proc: l = proc.group('before').strip() + ' '
      if l.strip()[0:1] == '/' : continue
      proc = re.match(emptyline,l)
      if proc: continue
      proc = re.match(key_none,l)
      if proc: continue
      proc = re.match(entryquote,l)
      line = proc.group('before')
      l = proc.group('after')
      while l != '':
         if l[0:1] == '"':
            proc = re.match(exitdquote,l+' ')
            if proc:
               line = line + "'" + proc.group('before').replace("'",'"') + "'"
               proc = re.match(entryquote,proc.group('after').strip())
               line = line + proc.group('before')
               l = proc.group('after').strip()
               print '>',l
            else:
               i = i + 1
               l = l.strip() + ' ' + dicoLines[i].strip()
         elif l[0:1] == "'":
            proc = re.match(exitsquote,l+' ')
            if proc:
               line = line + "'" + proc.group('before').replace("'",'"') + "'"
               proc = re.match(entryquote,proc.group('after').strip())
               line = line + proc.group('before')
               l = proc.group('after').strip()
            else:
               i = i + 1
               l = l.strip() + ' ' + dicoLines[i].strip()
      core.append(line)
   dicoStream = (' '.join(core)).replace('  ',' ').replace('""','"')
   # ~~ clean values to keep only the keys
   while dicoStream != '':
      # ~~ non-key
      proc = re.match(key_none,dicoStream)
      if proc:
         dicoStream = proc.group('after')
         continue
      # ~~ key
      proc = re.match(key_equals,dicoStream)
      if not proc: break
      kw = proc.group('key').strip()
      if kw not in dicokeys:
         print 'unknown key ',kw,proc.group('after'),dicoStream
         sys.exit(1)
      dicoStream = proc.group('after')   # still hold the separator
      # ~~ val
      proc = re.match(val_equals,dicoStream)
      if not proc:
         print 'no value to keyword ',kw
         sys.exit(1)
      val = []
      while proc:
         if proc.group('val')[0] == "'":
            val.append(proc.group('val')[1:len(proc.group('val'))-1])
         else:
            val.append(proc.group('val'))
         dicoStream = proc.group('after')   # still hold the separator
         proc = re.match(val_equals,dicoStream)
      keylist.append([kw,val])
   # ~~ sort out the groups, starting with 'NOM'
   dico = {'FR':{},'GB':{},'DICO':dicoFile}; keywords = {}
   while keylist != []:
      if keylist[0][0] != 'NOM' and keylist[1][0] != 'NOM1':
         print 'could not read NOM or NOM1 from ',keylist[0][1]
         sys.exit(1)
      dico['FR'].update({keylist[0][1][0].replace('"',"'"):keylist[1][1][0].replace('"',"'")})
      dico['GB'].update({keylist[1][1][0].replace('"',"'"):keylist[0][1][0].replace('"',"'")})
      key = keylist[0][1][0].replace('"',"'")
      words = {}
      words = {'NOM':keylist[0][1]}
      keylist.pop(0)
      while keylist != []:
         if keylist[0][0] == 'NOM': break
         words.update({keylist[0][0]:keylist[0][1]})
         keylist.pop(0)
      keywords.update({key:words})

   return dico,keywords
Example #20
0
def getINSEL(file):
    # ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    core = getFileContent(file)
    if not re.match(dat_footer, core[len(core) - 1]):
        print '\nCould not parse the following end line of the file: ' + core[
            len(core) - 1]
        sys.exit(1)

    # ~~ First scan at INSEL and DAMM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # This is also fairly fast, so you might not need a progress bar
    core.pop(len(core) - 1)
    poly = []
    vals = []
    typ = []
    npoin = 0
    iline = 0
    xyi = []
    val = []
    fileType = True
    while iline < len(core):
        proco = re.match(dat_openh, core[iline])
        if proco: t = 0
        procc = re.match(dat_closh, core[iline])
        if procc: t = 1
        if proco or procc:
            iline += 1
            if xyi != []:
                poly.append(xyi)
                npoin += len(xyi)
                typ.append(t)
                vals.append(val)
                xyi = []
                val = []
        else:
            proc = re.match(var_3dbl, core[iline].strip())
            if proc:
                xyi.append((proc.group('number1'), proc.group('number2')))
                val.append(proc.group('number3'))
            else:
                fileType = False
                proc = re.match(var_2dbl, core[iline].strip())
                if proc:
                    xyi.append((proc.group('number1'), proc.group('number2')))
                    val.append('')
                else:
                    print '\nCould not parse the following polyline record: ' + core[
                        iline]
                    sys.exit(1)
        iline += 1
    poly.append(xyi)
    npoin += len(xyi)
    typ.append(t)
    vals.append(val)

    # ~~ Second scan at INSEL and DAMM ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # This is also fairly fast, so you might not need a progress bar
    if fileType:
        for pline in range(len(poly)):
            for iline in range(len(poly[pline])):
                a, b = poly[pline][iline]
                poly[pline][iline] = [float(a), float(b)]
                vals[pline][iline] = [float(c)]
            poly[pline] = np.asarray(poly[pline])
    else:
        for pline in range(len(poly)):
            for iline in range(len(poly[pline])):
                a, b = poly[pline][iline]
                poly[pline][iline] = [float(a), float(b)]
            poly[pline] = np.asarray(poly[pline])
            vals = []

    return fileType, npoin, poly, vals, typ
Example #21
0
    def parseContent(self, fileName):
        # TODO: Read the whole header

        # ~~ Get all ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        core = getFileContent(fileName)

        # ~~ Parse head ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        icore = 0
        self.atrbut = {}
        self.natrbut = 0
        self.oatrbut = []
        while re.match(ken_header, core[icore].strip()):
            # ~~> instruction FileType
            proc = re.match(asc_FileType, core[icore].strip())
            if proc: self.fileType = proc.group('type').lower()
            # ~~> instruction AttributeName
            proc = re.match(asc_AttributeName, core[icore].strip())
            if proc:
                self.natrbut += 1
                if self.natrbut == int(proc.group('number')):
                    self.oatrbut.append(proc.group('after').strip())
                    self.atrbut.update({self.oatrbut[-1]: []})
                else:
                    print '... Could not read the order of your Attributes:', core[
                        icore]
                    sys.exit(1)
            # ... more instruction coming ...
            icore += 1
        self.head = core[0:icore]
        # /!\ icore starts the body

        # ~~ Parse body ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # This is also fairly fast, so you might not need a progress bar
        self.poly = []
        self.vals = []
        self.type = []
        self.npoin = 0
        while icore < len(core):
            if core[icore].strip() == '':
                icore += 1
                continue
            # ~~> polygon head
            proc = re.match(var_1int, core[icore].strip())
            if not proc:
                print '\nCould not parse the following polyline header: ' + core[
                    icore].strip()
                sys.exit(1)
            nrec = int(proc.group('number'))
            a = proc.group('after').strip().split()
            if len(a) != self.natrbut:
                if self.natrbut != 0:
                    print '... Could not find the correct number of attribute:', core[
                        icore].strip(), ', ', self.natrbut, ' expected'
                    sys.exit(1)
                else:
                    self.natrbut = len(a)
                    self.oatrbut = range(1, len(a) + 1)
                    self.atrbut = dict([(i + 1, [a[i]])
                                        for i in range(len(a))])
            else:
                for i in range(len(self.oatrbut)):
                    self.atrbut[self.oatrbut[i]].append(a[i])
            xyi = []
            val = []
            icore += 1
            for irec in range(nrec):
                nbres = core[icore + irec].strip().split()
                proc = re.match(var_1dbl, nbres[0])
                if not proc:
                    proc = re.match(var_1int, nbres[0])
                    if not proc:
                        print '\nCould not parse the following polyline record: ' + core[
                            icore + irec].strip()
                        sys.exit(1)
                nbres[0] = float(proc.group('number'))
                procd = re.match(var_1dbl, nbres[1])
                proci = re.match(var_1int, nbres[1])
                if procd: nbres[1] = float(procd.group('number'))
                elif proci: nbres[1] = float(procd.group('number'))
                xyi.append(nbres[:2])
                val.append(nbres[2:])
            if xyi != []:
                cls = 0
                if isClose(xyi[0], xyi[len(xyi) - 1], size=10):
                    xyi.pop(len(xyi) - 1)
                    val.pop(len(val) - 1)
                    cls = 1
                self.poly.append(np.asarray(xyi, dtype=np.float))
                self.vals.append(np.asarray(val, dtype=np.float))
                self.type.append(cls)
            self.npoin += len(xyi)
            icore += nrec

        self.npoly = len(self.poly)