def vtu2hdf5(postProcessor, dataDir):
    outdirLocal = dataDir + '/postProcess_' + os.path.basename(
        os_dir.filename_without_ext(postProcessor.base.xml_recipe)) + '_hdf5'
    if os.path.exists(outdirLocal):
        os_dir.deleteDirForce(outdirLocal)
    os.mkdir(outdirLocal)
    f = 0
    print('-> Converting .vtu to .hdf5, MOHID formated')
    for vtuFile in postProcessor.base.pvdReader.vtuFileHandlers:
        #convert the file
        r = vtuFile.getVtuVariableData('coords')
        hdf5FileName = os_dir.filename_without_ext(
            os.path.basename(vtuFile.fileName)) + '.hdf5'
        print('--> ' + os.path.basename(vtuFile.fileName) + ' -> ' +
              hdf5FileName)
        with h5py.File(outdirLocal + '/' + hdf5FileName, 'a') as hdf5File:
            #main groups
            grid = hdf5File.create_group("Grid")
            results = hdf5File.create_group("Results")
            time = hdf5File.create_group("Time")
            #subgroups
            group1 = results.create_group("Group_1/Data_1D")
            #writing data
            lon = group1.create_dataset('Longitude/Longitude_00001',
                                        data=r[:, 2],
                                        dtype='f')
            lon.attrs['Maximum'] = max(r[:, 2])
            lon.attrs['Minimum'] = min(r[:, 2])
            lon.attrs['Units'] = 'º'
            lat = group1.create_dataset('Latitude/Latitude_00001',
                                        data=r[:, 1],
                                        dtype='f')
            lat.attrs['Maximum'] = max(r[:, 1])
            lat.attrs['Minimum'] = min(r[:, 1])
            lat.attrs['Units'] = 'º'
            zz = group1.create_dataset('Z Pos/Z Position_00001',
                                       data=r[:, 0],
                                       dtype='f')
            zz.attrs['Maximum'] = max(r[:, 0])
            zz.attrs['Minimum'] = min(r[:, 0])
            zz.attrs['Units'] = 'm'
            r = vtuFile.getVtuVariableData('source')
            source = group1.create_dataset('Origin ID/Origin ID_00001',
                                           data=r,
                                           dtype='f')
            source.attrs['Maximum'] = max(r)
            source.attrs['Minimum'] = min(r)
            source.attrs['Units'] = '-'
            #writing time
            dateArray = MDateTime.getMOHIDDateFromTimeStamp(
                postProcessor.base.FileTimeHandler.timeAxis[f])
            date = time.create_dataset('Time_00001', data=dateArray, dtype='f')
            date.attrs['Maximum'] = max(dateArray)
            date.attrs['Minimum'] = min(dateArray)
            date.attrs['Units'] = 'YYYY/MM/DD HH:MM:SS'
            f = f + 1


#run('C:/Users/RBC_workhorse/Documents/GitHub/MOHID-Lagrangian/RUN_Cases/Arousa_2D_test_case/Arousa2D_case.xml','C:/Users/RBC_workhorse/Documents/GitHub/MOHID-Lagrangian/RUN_Cases/Arousa_2D_test_case/Post_scripts/PostRecipe_Arousa.xml','C:/Users/RBC_workhorse/Documents/GitHub/MOHID-Lagrangian/RUN_Cases/Arousa_2D_test_case/Arousa2D_case_out')
def main():
    lic = License()
    lic.print()

    # cmd line argument parsing
    argParser = argparse.ArgumentParser(
        description='Post processes MOHID Lagrangian outputs. Use -h for help.'
    )
    argParser.add_argument(
        "-i",
        "--input",
        dest="caseXML",
        help=".xml file with the case definition for the MOHID Lagrangian run",
        metavar=".xml")
    argParser.add_argument(
        "-f",
        "--force",
        dest="recipeXML",
        help=".xml file with the recipe for a post process run - optional",
        metavar=".xml")
    argParser.add_argument("-o",
                           "--outputDir",
                           dest="outDir",
                           help="output directory",
                           metavar="dir")
    argParser.add_argument("-po",
                           "--plot-only",
                           dest="plotonly",
                           help="output directory",
                           action='store_true')
    args = argParser.parse_args()

    caseXML = getattr(args, 'caseXML')
    recipeXML = []
    recipeXML.append(getattr(args, 'recipeXML'))
    outDir = getattr(args, 'outDir')

    print('-> Case definition file is', caseXML)
    print('-> Main output directory is', outDir)

    # get list of post cicles to run
    if recipeXML == [None]:
        # open caseXML and extract recipeXML names
        recipeXML = getRecipeListFromCase(caseXML)

    for recipe in recipeXML:
        print('-> Running recipe', recipe)
        outDirLocal = outDir + '/postProcess_' + os.path.basename(
            os_dir.filename_without_ext(recipe)) + '/'

        # If plotonly flag > Runs only plotting stage
        if args.plotonly is True:
            plotResultsFromRecipe(outDirLocal, recipe)
            return

        postProcessor = PostProcessor(caseXML, recipe, outDir, outDirLocal)
        postProcessor.run()

        if checkPlotRecipe(recipe):
            plotResultsFromRecipe(outDirLocal, recipe)
예제 #3
0
def csv2HDF5_Lag(fileName, datadir):

    df = pd.read_csv(datadir + '/' + fileName,
                     skipinitialspace=True,
                     delimiter=";")
    header = list(df)
    Date = df.Fecha
    Lat = df.Lat
    Lon = df.Lon
    ZPos = np.zeros(len(Lat))
    Dates = []
    for date in Date:
        Dates.append(re.findall('\d+', date))
    Dates = (np.array(Dates, dtype=np.float32)).transpose()

    cleanFileName = os_dir.filename_without_ext(fileName)
    with h5py.File(datadir + '/' + cleanFileName + '.hdf5', "w") as f:
        Grid = f.create_group("Grid")
        Results = f.create_group("Results")
        Group_1 = Results.create_group("Group_1")
        Data_1D = Group_1.create_group("Data_1D")
        Latitude = Data_1D.create_group("Latitude")
        Longitude = Data_1D.create_group("Longitude")
        Z_Pos = Data_1D.create_group("Z Pos")
        Time = f.create_group("Time")

        counter = 1
        for lats in Lat:
            fieldName = 'Latitude_' + str(counter).zfill(5)
            dset = Latitude.create_dataset(fieldName, data=lats)
            counter = counter + 1

        counter = 1
        for lons in Lon:
            fieldName = 'Longitude_' + str(counter).zfill(5)
            dset = Longitude.create_dataset(fieldName, data=lons)
            counter = counter + 1

        counter = 1
        for zpos in ZPos:
            fieldName = 'Z Position_' + str(counter).zfill(5)
            dset = Z_Pos.create_dataset(fieldName, data=zpos)
            counter = counter + 1

        counter = 1
        for column in Dates.T:
            fieldName = 'Time_' + str(counter).zfill(5)
            #dset = Time.create_dataset(fieldName, data=dates)
            dset = Time.create_dataset(fieldName, data=column)
            counter = counter + 1
예제 #4
0
 def openGlueWriter(self, fileNames, absSubDir, directory):
     self.hdf5FileName = fileNames
     self.glueDirectory = directory
     
     for hdf5File in self.hdf5FileName:
         if '_1' not in hdf5File:
             self.hdf5Reader = reader.MHDF5Reader(hdf5File, absSubDir)
             if self.hdf5Reader.isValidFile():
                 self.glueFileName.append(os_dir.filename_without_ext(hdf5File))
                 self.xdmfWriter.append(writer.MXDMFwriter())
                 
                 self.xdmfWriter[-1].openFile(self.glueFileName[-1], self.glueDirectory)
                 self.hdf5FileType.append(self.hdf5Reader.getFileType())
                 self.usedTimes.append([])
예제 #5
0
def main():
    lic = License()
    lic.print()

    #cmd line argument parsing
    argParser = argparse.ArgumentParser(
        description='Post processes MOHID Lagrangian outputs. Use -h for help.'
    )
    argParser.add_argument(
        "-i",
        "--input",
        dest="caseXML",
        help=".xml file with the case definition for the MOHID Lagrangian run",
        metavar=".xml")
    argParser.add_argument(
        "-f",
        "--force",
        dest="recipeXML",
        help=".xml file with the recipe for a post process run - optional",
        metavar=".xml")
    argParser.add_argument("-o",
                           "--outputDir",
                           dest="outDir",
                           help="output directory",
                           metavar="dir")
    args = argParser.parse_args()

    caseXML = getattr(args, 'caseXML')
    recipeXML = []
    recipeXML.append(getattr(args, 'recipeXML'))
    outDir = getattr(args, 'outDir')

    print('-> Case definition file is', caseXML)
    print('-> Main output directory is', outDir)

    #get list of post cicles to run
    if recipeXML == [None]:
        #open caseXML and extract recipeXML names
        recipeXML = getRecipeListFromCase(caseXML)

    for recipe in recipeXML:
        print('-> Running recipe', recipe)
        outDirLocal = outDir + '/postProcess_' + os.path.basename(
            os_dir.filename_without_ext(recipe)) + '/'
        measures = getFieldsFromRecipe(recipe)
        postProcessorBase = runPostprocessing(caseXML, recipe, outDir,
                                              outDirLocal, measures)
        if checkHDF5WriteRecipe(recipe):
            vtu2hdf5(postProcessorBase, outDir)
예제 #6
0
 def addFile(self, hdf5FileName, absSubDir, subdir, firstDate='', lastDate=''):
     self.currFileName = os_dir.filename_without_ext(hdf5FileName)
     self.currDir = absSubDir
     self.hdf5Reader = reader.MHDF5Reader(hdf5FileName, self.currDir)
     if self.hdf5Reader.isValidFile():
         f = self.glueFileName.index(self.currFileName)
         self.timeStep = 1
         corners3D = self.hdf5Reader.hasCorners3D()
         
         while self.timeStep <= self.hdf5Reader.getNumbTimeSteps():
                 meshDims = self.hdf5Reader.getMeshDims(self.timeStep)
                 dateStr = self.hdf5Reader.getDateStr(self.timeStep)
                 timeStamp = mdate.getTimeStampFromDateString(dateStr)
                 
                 #checking for exceptions to add the file
                 addStep = True
                 if firstDate != '':
                     firstDateStamp = mdate.getTimeStampFromDateString(firstDate)
                     if timeStamp < firstDateStamp:
                         addStep = False
                 if lastDate != '':
                     lastDateStamp = mdate.getTimeStampFromDateString(lastDate)
                     if timeStamp > lastDateStamp:
                         addStep = False
                 if timeStamp in self.usedTimes[f]:
                     addStep = False
                 
                 if addStep:
                     attributes = self.hdf5Reader.getAllAttributesPath(self.timeStep)                        
                     self.xdmfWriter[f].openGrid('Solution_'+str(self.timeStep).zfill(5))
                     self.xdmfWriter[f].writeGeo(self.hdf5FileType[f],self.timeStep,timeStamp,dateStr,meshDims,self.hdf5Reader.getGeoDims(),corners3D,subdir+'/')
                     for attr in attributes:
                         self.xdmfWriter[f].writeAttribute(self.hdf5FileType[f],attr,meshDims,self.hdf5Reader.getGeoDims(),corners3D,subdir+'/')
                     if self.hdf5Reader.getGeoDims() < 3:
                         if self.hdf5Reader.hasBathymetry():
                             self.xdmfWriter[f].writeAttribute(self.hdf5FileType[f],['Bathymetry','/Grid/Bathymetry'],meshDims,self.hdf5Reader.getGeoDims(),corners3D,subdir+'/')
                     self.xdmfWriter[f].closeGrid()
                     self.usedTimes[f].append(timeStamp)
                 
                 self.timeStep = self.timeStep + 1
     
     
     print('- [MXDMFmaker::addFile]:', 'Indexed the',self.currFileName+'.hdf5', 'file')
예제 #7
0
    def doFile(self, hdf5FileName, directory):
        self.hdf5FileName = os_dir.filename_without_ext(hdf5FileName)
        self.directory = directory
        self.hdf5FileType = []
        self.timeStep = 1
        #instantiating reader
        self.hdf5Reader = reader.MHDF5Reader(hdf5FileName, self.directory)
        
        #if file is valid, we create a xmdf writer object and feed it
        if self.hdf5Reader.isValidFile():
            self.xdmfWriter = writer.MXDMFwriter()
            self.xdmfWriter.openFile(self.hdf5FileName, self.directory)
            self.hdf5FileType = self.hdf5Reader.getFileType()

            print('- [MXDMFmaker::doFile]:', self.hdf5FileType, 'file')
            
            while self.timeStep <= self.hdf5Reader.getNumbTimeSteps():
                meshDims = self.hdf5Reader.getMeshDims(self.timeStep)
                dateStr = self.hdf5Reader.getDateStr(self.timeStep)
                timeStamp = mdate.getTimeStampFromDateString(dateStr)
                attributes = self.hdf5Reader.getAllAttributesPath(self.timeStep)
                corners3D = self.hdf5Reader.hasCorners3D()
                
                self.xdmfWriter.openGrid('Solution_'+str(self.timeStep).zfill(5))
                self.xdmfWriter.writeGeo(self.hdf5FileType,self.timeStep,timeStamp,dateStr,meshDims,self.hdf5Reader.getGeoDims(),corners3D,self.hdf5Reader.LatLon)
                for attr in attributes:
                    self.xdmfWriter.writeAttribute(self.hdf5FileType,attr,meshDims,self.hdf5Reader.getGeoDims(),corners3D)
                if self.hdf5Reader.getGeoDims() < 3:
                    if self.hdf5Reader.hasBathymetry():
                        self.xdmfWriter.writeAttribute(self.hdf5FileType,['Bathymetry','/Grid/Bathymetry'],meshDims,self.hdf5Reader.getGeoDims(),corners3D)
                self.xdmfWriter.closeGrid()
                
                self.timeStep = self.timeStep + 1
        
            self.xdmfWriter.closeFile()
            print('- [MXDMFmaker::doFile]:', 'Wrote',self.hdf5FileName+'.xdmf', 'file')
예제 #8
0
def run():

    lic = about.Licence()
    lic.print()

    #cmd line argument parsing---------------------------
    argParser = argparse.ArgumentParser(
        description=
        'Indexes input files for MOHID Lagrangian to parse. Use -h for help.')
    argParser.add_argument(
        "-i",
        "--input",
        dest="caseXML",
        help=".xml file with the case definition for the MOHID Lagrangian run",
        metavar=".xml")
    argParser.add_argument("-o",
                           "--outputDir",
                           dest="outDir",
                           help="output directory",
                           metavar=".xml")
    args = argParser.parse_args()

    caseXML = getattr(args, 'caseXML')
    outDir = getattr(args, 'outDir')
    print('-> Case definition file is ', caseXML)
    #---------------------------------------------------
    #parsing case definition file
    root = ET.parse(caseXML).getroot()

    dataDir = []
    dataType = []
    for type_tag in root.findall('caseDefinitions/inputData/inputDataDir'):
        dataDir.append(type_tag.get('name'))
        dataType.append(type_tag.get('type'))

    for type_tag in root.findall('execution/parameters/parameter'):
        if type_tag.get('key') == 'Start':
            StartTime = datetime.strptime(type_tag.get('value'),
                                          "%Y %m %d %H %M %S")
        if type_tag.get('key') == 'End':
            EndTime = datetime.strptime(type_tag.get('value'),
                                        "%Y %m %d %H %M %S")

    #------------------------------------------------------
    if len(dataDir) > 1:
        print('-> Input data directories are', dataDir)
    else:
        print('-> Input data directory is', dataDir)

    #------------------------------------------------------
    fileExtensions = ['.nc', '.nc4']

    #going for each input directory and indexing its files
    inputFiles = []
    for idir in dataDir:
        for ext in fileExtensions:
            inputFiles.append(glob.glob(idir + '/**/*' + ext, recursive=True))
    #cleaning list of empty values
    inputFiles = list(filter(None, inputFiles))

    if not inputFiles:

        print('No input files found. Supported files are ', fileExtensions)

    else:

        indexerFileName = os_dir.filename_without_ext(caseXML) + '_inputs'
        indexer = xmlWriter.xmlWriter(indexerFileName)

        #going trough every file, extracting some metadata and writting in the indexer file
        ncMeta = []
        for idir in inputFiles:
            for ifile in idir:
                print('--> reading file', ifile)
                ncMeta.append(ncMetaParser.ncMetadata(ifile, StartTime))

        ncMeta.sort(key=lambda x: x.startTime)

        indexer.openCurrentsCollection()

        print('--> indexing currents data')
        for ncfile in ncMeta:
            indexer.writeFile(
                ncfile.getName(), ncfile.getstartTime(), ncfile.getendTime(),
                ncfile.getstartDate().strftime("%Y %m %d %H %M %S"),
                ncfile.getendDate().strftime("%Y %m %d %H %M %S"))

        indexer.closeCurrentsCollection()
        indexer.closeFile()
예제 #9
0
def run():

    lic = about.Licence()
    lic.print()

    #cmd line argument parsing---------------------------
    argParser = argparse.ArgumentParser(
        description=
        'Indexes input files for MOHID Lagrangian to parse. Use -h for help.')
    argParser.add_argument(
        "-i",
        "--input",
        dest="caseXML",
        help=".xml file with the case definition for the MOHID Lagrangian run",
        metavar=".xml")
    argParser.add_argument("-o",
                           "--outputDir",
                           dest="outDir",
                           help="output directory",
                           metavar="dir")
    args = argParser.parse_args()

    caseXML = getattr(args, 'caseXML')
    outDir = getattr(args, 'outDir')
    print('-> Case definition file is ', caseXML)
    #---------------------------------------------------
    #parsing case definition file
    root = ET.parse(caseXML).getroot()

    dataList = []
    for type_tag in root.findall('caseDefinitions/inputData/inputDataDir'):
        dataList.append((type_tag.get('name'), type_tag.get('type')))

    for type_tag in root.findall('execution/parameters/parameter'):
        if type_tag.get('key') == 'Start':
            StartTime = datetime.strptime(type_tag.get('value'),
                                          "%Y %m %d %H %M %S")
        if type_tag.get('key') == 'End':
            EndTime = datetime.strptime(type_tag.get('value'),
                                        "%Y %m %d %H %M %S")

    #------------------------------------------------------
    if len(dataList) > 1:
        print('-> Input data directories are', [row[0] for row in dataList])
    else:
        print('-> Input data directory is', dataList)

    #------------------------------------------------------
    fileExtensions = ['.nc', '.nc4']

    #going for each input directory and indexing its files
    inputFileCurrents = []
    inputFileWinds = []
    inputFileWaves = []
    inputFileWaterProps = []
    for idir in dataList:
        for ext in fileExtensions:
            if idir[1] == 'hydrodynamic':
                inputFileCurrents.append(
                    glob.glob(idir[0] + '/**/*' + ext, recursive=True))
            if idir[1] == 'waves':
                inputFileWaves.append(
                    glob.glob(idir[0] + '/**/*' + ext, recursive=True))
            if idir[1] == 'meteorology':
                inputFileWinds.append(
                    glob.glob(idir[0] + '/**/*' + ext, recursive=True))
            if idir[1] == 'waterProperties':
                inputFileWaterProps.append(
                    glob.glob(idir[0] + '/**/*' + ext, recursive=True))
    #cleaning list of empty values
    inputFileCurrents = list(filter(None, inputFileCurrents))
    inputFileWinds = list(filter(None, inputFileWinds))
    inputFileWaves = list(filter(None, inputFileWaves))
    inputFileWaterProps = list(filter(None, inputFileWaterProps))

    #going for each input directory and indexing its files
    inputFiles = []
    for idir in dataList:
        for ext in fileExtensions:
            inputFiles.append(
                glob.glob(idir[0] + '/**/*' + ext, recursive=True))
    #cleaning list of empty values
    inputFiles = list(filter(None, inputFiles))

    nInputs = len(inputFileCurrents) + len(inputFileWinds) + len(
        inputFileWaves) + len(inputFileWaterProps)
    if nInputs == 0:
        print('No input files found. Supported files are ', fileExtensions)
    else:
        indexerFileName = os_dir.filename_without_ext(caseXML) + '_inputs'
        indexer = xmlWriter.xmlWriter(indexerFileName)
        inputFile = [
            inputFileCurrents, inputFileWaves, inputFileWinds,
            inputFileWaterProps
        ]
        inputType = ['hydrodynamic', 'waves', 'meteorology', 'waterProperties']

        inputType = [
            inputType[i] for i in range(0, len(inputType))
            if inputFile[i] != []
        ]
        #going trough every file, extracting some metadata and writting in the indexer file, for each file type
        i = 0
        for inputList in inputFile:
            if len(inputList) > 0:
                ncMeta = []
                for idir in inputList:
                    for ifile in idir:
                        print('--> reading file', ifile)
                        ncMeta.append(ncMetaParser.ncMetadata(
                            ifile, StartTime))
                ncMeta.sort(key=lambda x: x.startTime)
                # Going throug all the files and check the time axis integrity.
                ncMetaParser.ncDimParser.checkTime(ncMeta)
                indexer.openCollection(inputType[i])
                print('--> indexing', inputType[i], 'data')
                for ncfile in ncMeta:
                    indexer.writeFile(
                        ncfile.getName(), ncfile.getstartTime(),
                        ncfile.getendTime(),
                        ncfile.getstartDate().strftime("%Y %m %d %H %M %S"),
                        ncfile.getendDate().strftime("%Y %m %d %H %M %S"))
                indexer.closeCollection(inputType[i])
                i = i + 1

        indexer.closeFile()
        print('-> All done, wrote', indexerFileName + '.xml', 'indexing file')