Exemple #1
0
def mainDfa2Aimec(avaDir, cfgSetup):
    """ Exports the required data from com1DFA to be used by Aimec """

    # Create required directories
    workDir = os.path.join(avaDir, 'Work', 'ana3AIMEC', 'com1DFA')
    fU.makeADir(workDir)
    flowDepthDir = os.path.join(workDir, 'dfa_depth')
    fU.makeADir(flowDepthDir)
    pressureDir = os.path.join(workDir, 'dfa_pressure')
    fU.makeADir(pressureDir)
    speedDir = os.path.join(workDir, 'dfa_speed')
    fU.makeADir(speedDir)
    massDir = os.path.join(workDir, 'dfa_mass_balance')
    fU.makeADir(massDir)
    log.info('Aimec Work folders created to start postprocessing com1DFA data')

    # Setup input from com1DFA and export to Work ana3AIMEC
    suffix = {
        'type': ['pfd', 'ppr', 'pv'],
        'directory': ['dfa_depth', 'dfa_pressure', 'dfa_speed']
    }
    countsuf = 0
    for suf in suffix['type']:
        fU.getDFAData(avaDir, workDir, suf, suffix['directory'][countsuf])
        countsuf = countsuf + 1

    # Write the paths to this files to a file
    writeAimecPathsFile(cfgSetup, avaDir)

    # Extract the MB info
    extractMBInfo(avaDir)
Exemple #2
0
def saveInitialParticleDistribution(avaDir, simName, dem):
    x = np.empty(0)
    y = np.empty(0)
    z = np.empty(0)
    m = np.empty(0)
    DEM = IOf.readRaster(dem)
    header = DEM['header']
    # Read log file
    fileName = os.path.join(os.getcwd(), avaDir, 'Outputs', 'com1DFAOrig', 'start%s.log' % (simName))
    with open(fileName, 'r') as file:
        for line in file:
            if "IPD" in line:
                ltime = line.split(', ')
                x = np.append(x, float(ltime[1]))
                y = np.append(y, float(ltime[2]))
                z = np.append(z, float(ltime[3]))
                m = np.append(m, float(ltime[4]))

    x = x - header.xllcenter
    y = y - header.yllcenter
    particles = {'t': 0.0, 'x': x, 'y': y, 'z': z, 'm': m}

    partDit = os.path.join(os.getcwd(), avaDir, 'Outputs', 'com1DFAOrig', 'particles', simName)
    fU.makeADir(partDit)
    savePartToPickle(particles, partDit)
Exemple #3
0
def initialiseRunDirs(avaDir, modName):
    """ Initialise Simulation run with input data

        Parameters
        ----------
        avaDir : str
            path to avalanche directoy
        modName : str
            name of module

        Returns
        -------
        workDir : str
            path to Work directory
        outputDir : str
            path to Outputs directory
    """

    # Set directories outputs and current work
    outputDir = os.path.join(avaDir, 'Outputs', modName)
    fU.makeADir(outputDir)
    workDir = os.path.join(avaDir, 'Work', modName)
    # If Work directory already exists - error
    if os.path.isdir(workDir):
        log.error('Work directory %s already exists - delete first!' %
                  (workDir))
    else:
        os.makedirs(workDir)
    log.debug('Directory: %s created' % workDir)

    return workDir, outputDir
Exemple #4
0
def quickPlotSimple(avaDir, inputDir, cfg):
    """ Plot two raster datasets of identical dimension and difference between two datasets

        figure 1: plot raster data for dataset1, dataset2 and their difference
        figure 2: plot cross and longprofiles for both datasets (ny_loc and nx_loc define location of profiles)
        -plots are saved to Outputs/out3Plot

        Parameters
        ----------
        avaDir : str
            path to avalanche directory
        inputDir : str
            path to directory of input data (only 2 raster files allowed)

    """

    outDir = os.path.join(avaDir, 'Outputs', 'out3Plot')
    fU.makeADir(outDir)

    # Get name of Avalanche
    avaName = os.path.basename(avaDir)

    # Load input datasets from input directory
    datafiles = glob.glob(inputDir + os.sep + '*.asc')
    datafiles.extend(glob.glob(inputDir + os.sep + '*.txt'))

    name1 = os.path.basename(datafiles[0])
    name2 = os.path.basename(datafiles[1])
    log.info('input dataset #1 is %s' % name1)
    log.info('input dataset #2 is %s' % name2)

    # Load data
    raster = IOf.readRaster(datafiles[0])
    rasterRef = IOf.readRaster(datafiles[1])
    data1, data2 = geoTrans.resizeData(raster, rasterRef)
    header = IOf.readASCheader(datafiles[0])
    cellSize = header.cellsize

    # Create dataDict to be passed to generatePlot
    dataDict = {
        'data1': data1,
        'data2': data2,
        'name1': name1,
        'name2': name2,
        'compareType': '',
        'cellSize': cellSize
    }

    # Initialise plotList
    plotDict = {'plots': [], 'difference': [], 'stats': []}

    # Create Plots
    plotList = generatePlot(dataDict, avaName, outDir, cfg, plotDict)
Exemple #5
0
def plotResults(x, h, u, dtStep, cfg):
    """ Create plots of the analytical solution for the given settings,
        including an animation
    """

    # index of time steps
    dtInd = int(100. * dtStep)
    # output directory
    avaDir = cfg['MAIN']['avalancheDir']
    outDir = os.path.join(avaDir, 'Outputs', 'ana1Tests')
    fU.makeADir(outDir)

    fig = _plotVariable(h, x, dtInd, dtStep, 'Flow depth [m]')
    fig.savefig(
        os.path.join(outDir, 'damBreakFlowDepth.%s' % (pU.outputFormat)))

    if cfg['FLAGS'].getboolean('showPlot'):
        plt.show()
    else:
        plt.close(fig)

    fig = _plotVariable(u, x, dtInd, dtStep, 'Flow velocity [ms-1]')
    fig.savefig(
        os.path.join(outDir, 'damBreakFlowVelocity.%s' % (pU.outputFormat)))
    if cfg['FLAGS'].getboolean('showPlot'):
        plt.show()
    else:
        plt.close(fig)

    if cfg['FLAGS'].getboolean('showPlot'):
        plt.show()

        # now start visualizing results using animation
        fig, ax = plt.subplots()

        def make_step(step):
            ax.clear()
            ax.plot(x, h[:, 0], 'k--', label='t_init')
            ax.plot(x, h[:, step], label='t=%d' % (step))
            plt.title('Animation of dry-bed test')
            ax.set_xlabel('x-coordinate [m]')
            ax.set_ylabel('Flow Depth [m]')
            plt.legend()

        anim = animation.FuncAnimation(fig,
                                       make_step,
                                       interval=0.1,
                                       frames=1000)
        plt.show()
Exemple #6
0
def initialiseRun(avaDir, flagEnt, flagRes, inputf='shp'):
    """ Initialise Simulation Run with input data """

    # Set directories for inputs, outputs and current work
    inputDir = os.path.join(avaDir, 'Inputs')
    outputDir = os.path.join(avaDir, 'Outputs', 'com1DFA')
    fU.makeADir(outputDir)
    workDir = os.path.join(avaDir, 'Work', 'com1DFA')
    fU.makeADir(workDir)

    # Initialise release areas, default is to look for shapefiles
    if inputf == 'nxyz':
        relFiles = glob.glob(inputDir+os.sep + 'REL'+os.sep + '*.nxyz')
    else:
        relFiles = glob.glob(inputDir+os.sep + 'REL'+os.sep + '*.shp')
        log.info('Release area files are: %s' % relFiles)

    # Initialise resistance areas
    if flagRes:
        resFiles = glob.glob(inputDir+os.sep + 'RES' + os.sep+'*.shp')
        if len(resFiles) < 1:
            log.warning('No resistance file')
            resFiles.append('')  # Kept this for future enhancements
    else:
        resFiles = []
        resFiles.append('')

    # Initialise entrainment areas
    if flagEnt:
        entFiles = glob.glob(inputDir+os.sep + 'ENT' + os.sep+'*.shp')
        if len(entFiles) < 1:
            log.warning('No entrainment file')
            entFiles.append('')  # Kept this for future enhancements
    else:
        entFiles = []
        entFiles.append('')

    # Initialise DEM
    demFile = glob.glob(inputDir+os.sep+'*.asc')

    # Initialise full experiment log file
    with open(os.path.join(workDir, 'ExpLog.txt'), 'w') as logFile:
        logFile.write("NoOfSimulation,SimulationRunName,Mu\n")

    # return DEM, first item of release, entrainment and resistance areas
    return demFile[0], relFiles, entFiles[0], resFiles[0]
Exemple #7
0
def quickPlotOne(inputDir, datafile, cfg, locVal, axis, resType=''):
    """ Plots one raster dataset and a cross profile

        figure 1: plot raster data for dataset and profile
        -plot is saved to Outputs/out3Plot

        Parameters
        ----------
        inputDir : str
            path to directory of input data (takes first dataset)
        datafile : str
            path to data file
        cfg : dict
            configuration including flags for plotting
        locVal : float
            location of cross profile
        resType : str
            result parameter type e.g. 'pfd' - optional

    """

    outDir = os.path.join(inputDir, 'out3Plot')
    fU.makeADir(outDir)

    name1 = os.path.basename(datafile)
    log.info('input dataset #1 is %s' % name1)

    # Load data
    data1 = np.loadtxt(datafile, skiprows=6)
    header = IOf.readASCheader(datafile)
    cellSize = header.cellsize

    # Create dataDict to be passed to generatePlot
    dataDict = {'data1': data1, 'name1': name1, 'cellSize': cellSize}

    # Initialise plotList
    plotDict = {
        'plots': [],
        'location': locVal,
        'resType': resType,
        'axis': axis
    }

    # Create Plots
    plotList = generateOnePlot(dataDict, outDir, cfg, plotDict)
Exemple #8
0
from avaframe.in1Data import computeFromDistribution as cF
from avaframe.out3Plot import in1DataPlots as iPlot
from avaframe.in3Utils import fileHandlerUtils as fU
from avaframe.in3Utils import cfgUtils
from avaframe.in3Utils import logUtils

# log file name; leave empty to use default runLog.log
logName = 'runSampleFromDist'

# Load avalanche directory from general configuration file
cfgAva = cfgUtils.getGeneralConfig()
avalancheDir = cfgAva['MAIN']['avalancheDir']

# set working directory
workingDir = os.path.join(avalancheDir, 'Outputs', 'in1Data')
fU.makeADir(workingDir)

# Load input parameters from configuration file
cfgMain = cfgUtils.getGeneralConfig()
cfg = cfgUtils.getModuleConfig(cF)
cfgGen = cfg['GENERAL']

# log file name; leave empty to use default runLog.log
logName = 'runSampleFromDist%s' % cfgGen['distType']

# Start logging
log = logUtils.initiateLogger(workingDir, logName)
log.info('MAIN SCRIPT')

# load parameters required to compute specific distribution
a = float(cfgGen['a'])
logName = 'runVariationsTests'

# Load settings from general configuration file
cfgMain = cfgUtils.getGeneralConfig()

# load all benchmark info as dictionaries from description files
testDictList = tU.readAllBenchmarkDesDicts(info=False)

# filter benchmarks for a tag
type = 'TAGS'
valuesList = ['varParTest']
testList = tU.filterBenchmarks(testDictList, type, valuesList, condition='and')

# Set directory for full standard test report
outDir = os.path.join(os.getcwd(), 'tests', 'reportsVariations')
fU.makeADir(outDir)

# Start writing markdown style report for standard tests
reportFile = os.path.join(outDir, 'variationTestsReport.md')
with open(reportFile, 'w') as pfile:
    # Write header
    pfile.write('# Variation Tests Report \n')
    pfile.write('## Compare com1DFA simulations to benchmark results \n')

log = logUtils.initiateLogger(outDir, logName)
log.info('The following benchmark tests will be fetched ')
for test in testList:
    log.info('%s' % test['NAME'])

for test in testList:
from avaframe.in3Utils import cfgUtils
from avaframe.in3Utils import logUtils

# log file name; leave empty to use default runLog.log
logName = 'runComparisonModules'

# Load settings from general configuration file
cfgMain = cfgUtils.getGeneralConfig()

# load all benchmark info as dictionaries from description files
testList = ['avaFlatPlane']
simType = 'null'
simTypeString = '_' + simType + '_'
# Set directory for full standard test report
outDirReport = os.path.join(os.getcwd(), 'tests', 'reportscom1DFAOrigvsPy')
fU.makeADir(outDirReport)

# Start writing markdown style report for standard tests
reportFile = os.path.join(outDirReport, 'com1DFAOrigvsPy.md')
with open(reportFile, 'w') as pfile:

    # Write header
    pfile.write('# Standard Tests Report \n')
    pfile.write(
        '## Compare com1DFAOrig simulation to com1DFA simulation results \n')

# run Standard Tests sequentially
for avaName in testList:

    avaDir = 'data' + os.sep + avaName
Exemple #11
0
def plotComparison(dataComSol, hL, xR, hR, uR, dtAnalysis, cfgMain):
    """ Generate plots that compare the simulation results to the analytical solution

        Parameters
        -----------

        dataComsol: dict
            dictionary of simulation results (including name, file path, result type, etc.)
        hL: float
            initial release thickness
        xR: numpy array
            x extent of domain
        hR: numpy array
            flow depth of analytical solution
        uR: numpy array
            flow velocity of analytical solution
        dtAnalysis: float
            time step of analaysis
        cfgMain: dict
            main configuration for AvaFrame

    """

    # load results
    for m in range(len(dataComSol['files'])):
        if dataComSol['resType'][m] == 'FD' and 't5.' in dataComSol[
                'timeStep'][m]:
            data1FD = dataComSol['files'][m]
            name1FD = dataComSol['names'][m]
        elif dataComSol['resType'][m] == 'FV' and 't5.' in dataComSol[
                'timeStep'][m]:
            data1V = dataComSol['files'][m]
            name1V = dataComSol['names'][m]
        elif dataComSol['resType'][m] == 'FD' and 't0.0' in dataComSol[
                'timeStep'][m]:
            data2FD = dataComSol['files'][m]
        elif dataComSol['resType'][m] == 'FV' and 't0.0' in dataComSol[
                'timeStep'][m]:
            data2V = dataComSol['files'][m]

    # Load data
    dataIniFD = np.loadtxt(data2FD, skiprows=6)
    dataAnaFD = np.loadtxt(data1FD, skiprows=6)
    dataIniV = np.loadtxt(data2V, skiprows=6)
    dataAnaV = np.loadtxt(data1V, skiprows=6)

    log.info('File for flow depth: %s' % name1FD)
    log.info('File for flow velocity: %s' % name1V)

    # Location of Profiles
    header = IOf.readASCheader(data1FD)
    cellSize = header.cellsize
    ny = dataAnaFD.shape[0]
    nx = dataAnaFD.shape[1]
    xllc = header.xllcenter
    nx_loc = int(ny * 0.5)

    # set x Vector
    x = np.arange(xllc, xllc + nx * cellSize, cellSize)
    y = np.zeros(len(x))
    y[x < 0] = hL
    y[x >= 0] = 0.0

    # setup index for time of analyitcal solution
    tR = int(dtAnalysis * 100.0)

    # setup output directory
    outDir = os.path.join(cfgMain['MAIN']['avalancheDir'], 'Outputs',
                          'ana1Tests')
    fU.makeADir(outDir)

    fig = _plotMultVariables(x, y, nx_loc, dtAnalysis, dataIniFD, dataAnaFD,
                             xR, hR, tR, 'Flow depth', 'm')
    fig.savefig(os.path.join(outDir,
                             'CompareDamBreakH.%s' % (pU.outputFormat)))

    y = np.zeros(len(x))
    fig = _plotMultVariables(x, y, nx_loc, dtAnalysis, dataIniV, dataAnaV, xR,
                             uR, tR, 'Flow velocity', 'm')
    fig.savefig(
        os.path.join(outDir, 'CompareDamBreakVel.%s' % (pU.outputFormat)))

    if cfgMain['FLAGS'].getboolean('showPlot'):
        plt.show()
    else:
        plt.close(fig)
Exemple #12
0
def plotAllFields(avaDir, inputDir, outDir, cfg):
    """ Plot all fields within given directory and save to outDir

        Parameters
        ----------
        avaDir : str
            path to avalanche directoy
        inputDir : str
            path to input directoy
        outDir : str
            path to directoy where plots shall be saved to
        cfg : dict
            configuration settings

        """

    # Load all infos on simulations
    peakFiles = glob.glob(inputDir + os.sep + '*.asc')

    # create out dir if not allready existing
    fU.makeADir(outDir)

    # Loop through peakFiles and generate plot
    for filename in peakFiles:

        # Load data
        raster = IOf.readRaster(filename)
        data = raster['rasterData']
        data = np.ma.masked_where(data == 0.0, data)
        name = os.path.splitext(os.path.basename(filename))[0]

        # get header info for file writing
        header = raster['header']
        cellSize = header.cellsize

        # Set extent of peak file
        ny = data.shape[0]
        nx = data.shape[1]
        Ly = ny * cellSize
        Lx = nx * cellSize
        unit = pU.cfgPlotUtils['unit%s' % cfg['GENERAL']['peakVar']]

        # Figure  shows the result parameter data
        fig = plt.figure(figsize=(pU.figW, pU.figH))
        fig, ax = plt.subplots()
        # choose colormap
        cmap, _, _, norm, ticks = makePalette.makeColorMap(
            pU.cmapPres, np.amin(data), np.amax(data), continuous=pU.contCmap)
        cmap.set_bad('w')
        im1 = ax.imshow(data,
                        cmap=cmap,
                        extent=[0, Lx, 0, Ly],
                        origin='lower',
                        aspect=nx / ny)
        pU.addColorBar(im1, ax, ticks, unit)

        title = str('%s' % name)
        ax.set_title(title)
        ax.set_xlabel('x [m]')
        ax.set_ylabel('y [m]')

        plotName = os.path.join(outDir, '%s.%s' % (name, pU.outputFormat))

        pU.putAvaNameOnPlot(ax, avaDir)

        fig.savefig(plotName)
        plt.close('all')
Exemple #13
0
# Start logging
log = logUtils.initiateLogger(avalancheDir, logName)
log.info('MAIN SCRIPT')
log.info('Current avalanche: %s', avalancheDir)

# Load configuration
FPCfg = os.path.join(avalancheDir, 'Inputs', 'FlatPlane_com1DFACfg.ini')
cfg = cfgUtils.getModuleConfig(com1DFA, FPCfg)
cfgGen = cfg['GENERAL']
cfgFP = cfg['FPSOL']

# for timing the sims
startTime = time.time()
# create output directory for test result plots
outDirTest = os.path.join(avalancheDir, 'Outputs', 'ana1Tests')
fU.makeADir(outDirTest)

# Define release thickness distribution
demFile, relFiles, entFiles, resFile, flagEntRes = gI.getInputData(
    avalancheDir, cfg['FLAGS'])
relDict = FPtest.getReleaseThickness(avalancheDir, cfg, demFile)
relTh = relDict['relTh']

# call com1DFA to perform simulation - provide configuration file and release thickness function
Particles, Fields, Tsave, dem, plotDict, reportDictList = runCom1DFA.runCom1DFA(
    avaDir=avalancheDir, cfgFile=FPCfg, relThField=relTh)
relDict['dem'] = dem
# +++++++++POSTPROCESS++++++++++++++++++++++++
# option for user interaction
if cfgFP.getboolean('flagInteraction'):
    showPlot = True
Exemple #14
0
def quickPlotBench(avaDir, simNameRef, simNameComp, refDir, compDir, cfg,
                   cfgPlot, suffix):
    """ Plot simulation result and compare to reference solution
        (two raster datasets of identical dimension) and save to
        Outputs/out3Plot within avalanche directoy

        figure 1: plot raster data for dataset1, dataset2 and their difference,
                  including a histogram and the cumulative density function of the differences
        figure 2: plot cross and longprofiles for both datasets (ny_loc and nx_loc define location of profiles)
        -plots are saved to Outputs/out3Plot

        Parameters
        ----------
        avaDir : str
            path to avalanche directory
        suffix : str
            result parameter abbreviation (e.g. 'ppr')
        val : str
            value of parameter
        parameter : str
            parameter that is used to filter simulation results
            within folder, for example, symType, parameter variation, etc.
        cfg : dict
            global configuration settings
        cfgPlot : dict
            configuration settings for plots, required for flag if plots shall be shown or only saved
        rel : str
            optional - name of release area scenarios
        simType : str
            optional - simulation type null or entres

        Returns
        -------
        plotList : list
            list of plot dictionaries (path to plots, min, mean and max difference
            between plotted datasets, max and mean value of reference dataset )

    """

    # Create required directories
    outDir = os.path.join(avaDir, 'Outputs', 'out3Plot')
    fU.makeADir(outDir)

    # Initialise plotDictList
    plotList = []

    # Initialise plotList
    plotDict = {'plots': [], 'difference': [], 'stats': []}

    simRefFile = os.path.join(refDir, simNameRef + '_' + suffix + '.asc')
    simCompFile = os.path.join(compDir, simNameComp + '_' + suffix + '.asc')

    if os.path.isfile(simRefFile) == False or os.path.isfile(
            simCompFile) == False:
        log.error('File for result type: %s not found' % suffix)

    # Load data
    raster = IOf.readRaster(simCompFile)
    rasterRef = IOf.readRaster(simRefFile)
    data1, data2 = geoTrans.resizeData(raster, rasterRef)
    log.debug('dataset1: %s' % simCompFile)
    log.debug('dataset2: %s' % simRefFile)

    cellSize = rasterRef['header'].cellsize
    unit = pU.cfgPlotUtils['unit%s' % suffix]

    # Get name of Avalanche
    avaName = os.path.basename(avaDir)
    # Create dataDict to be passed to generatePlot
    dataDict = {
        'data1': data1,
        'data2': data2,
        'name1': simNameComp + '_' + suffix,
        'name2': simNameRef + '_' + suffix,
        'compareType': 'compToRef',
        'simName': simNameComp,
        'suffix': suffix,
        'cellSize': cellSize,
        'unit': unit
    }
    # Create Plots
    plotDictNew = generatePlot(dataDict, avaName, outDir, cfg, plotDict)

    return plotDictNew
Exemple #15
0
def quickPlot(avaDir,
              testDir,
              suffix,
              val,
              parameter,
              cfg,
              cfgPlot,
              rel='',
              simType='null',
              comModule='com1DFA',
              comModule2=''):
    """ Plot simulation result and compare to reference solution
        (two raster datasets of identical dimension) and save to
        Outputs/out3Plot within avalanche directoy

        figure 1: plot raster data for dataset1, dataset2 and their difference,
                  including a histogram and the cumulative density function of the differences
        figure 2: plot cross and longprofiles for both datasets (ny_loc and nx_loc define location of profiles)
        -plots are saved to Outputs/out3Plot

        Parameters
        ----------
        avaDir : str
            path to avalanche directory
        suffix : str
            result parameter abbreviation (e.g. 'ppr')
        val : str
            value of parameter
        parameter : str
            parameter that is used to filter simulation results within folder,
            for example, symType, parameter variation, etc.
        cfg : dict
            global configuration settings
        cfgPlot : dict
            configuration settings for plots, required for flag if plots shall be shown or only saved
        rel : str
            optional - name of release area scenarios
        simType : str
            optional - simulation type null or entres

        Returns
        -------
        plotList : list
            list of plot dictionaries (path to plots, min, mean and max difference
            between plotted datasets, max and mean value of reference dataset )

    """

    # Create required directories
    workDir = os.path.join(avaDir, 'Work', 'out3Plot')
    fU.makeADir(workDir)
    outDir = os.path.join(avaDir, 'Outputs', 'out3Plot')
    fU.makeADir(outDir)

    # Initialise plotDictList
    plotList = []

    # Setup input from com1DFA
    fU.getDFAData(avaDir, workDir, suffix, comModule=comModule)
    if comModule2 == '':
        # Get data from reference run
        fU.getRefData(testDir, workDir, suffix)
    else:
        fU.getDFAData(avaDir, workDir, suffix, comModule=comModule2)

    # prepare data
    if parameter == 'Mu' or parameter == 'RelTh':
        data = fU.makeSimDict(workDir, parameter, avaDir)
    else:
        data = fU.makeSimDict(workDir, '', avaDir)

    cellSize = data['cellSize'][0]
    unit = pU.cfgPlotUtils['unit%s' % suffix]

    # check if release Area and simType area provided
    if rel != '':
        relAreas = [rel]
    else:
        # Count the number of release areas
        relAreas = set(data['releaseArea'])
    if parameter == 'simType':
        simType = val

    for rel in relAreas:

        # Initialise plotList
        plotDict = {'relArea': rel, 'plots': [], 'difference': [], 'stats': []}

        # get list of indices of files that are of correct simulation type and result paramete
        indSuffix = [-9999, -9999]
        findComp = True
        for m in range(len(data['files'])):
            if data['resType'][m] == suffix and data['releaseArea'][
                    m] == rel and data[parameter][m] == val and data[
                        'simType'][m] == simType:
                if (data['modelType'][m] == 'dfa') and findComp:
                    indSuffix[0] = m
                    findComp = False
                elif data['modelType'][m] == cfgPlot['PLOT']['refModel']:
                    indSuffix[1] = m
        if findComp:
            log.error('No matching files found')

        # Load data
        raster = IOf.readRaster(data['files'][indSuffix[0]])
        rasterRef = IOf.readRaster(data['files'][indSuffix[1]])
        data1, data2 = geoTrans.resizeData(raster, rasterRef)
        log.debug('dataset1: %s' % data['files'][indSuffix[0]])
        log.debug('dataset2: %s' % data['files'][indSuffix[1]])

        # Get name of Avalanche
        avaName = data['avaName'][indSuffix[0]]

        # Create dataDict to be passed to generatePlot
        dataDict = {
            'data1': data1,
            'data2': data2,
            'name1': data['names'][indSuffix[0]],
            'name2': data['names'][indSuffix[1]],
            'compareType': 'compToRef',
            'simName': data['simName'][indSuffix[0]],
            'suffix': suffix,
            'cellSize': cellSize,
            'unit': unit
        }

        # Create Plots
        plotDictNew = generatePlot(dataDict, avaName, outDir, cfg, plotDict)
        plotList.append(plotDictNew)

    return plotList
Exemple #16
0
def plotAllPeakFields(avaDir, cfg, cfgFLAGS, modName):
    """ Plot all peak fields and return dictionary with paths to plots

        Parameters
        ----------
        avaDir : str
            path to avalanche directoy
        cfg : dict
            configuration used to perform simulations
        cfgFLAGS : str
            general configuration, required to define if plots saved to reports directoy
        modName : str
            name of module that has been used to produce data to be plotted

        Returns
        -------
        plotDict : dict
            dictionary with info on plots, like path to plot
        """

    # Load all infos on simulations
    inputDir = os.path.join(avaDir, 'Outputs', modName, 'peakFiles')
    peakFiles = fU.makeSimDict(inputDir, '', avaDir)

    demFile = gI.getDEMPath(avaDir)
    demData = IOf.readRaster(demFile)
    demField = demData['rasterData']

    # Output directory
    if cfgFLAGS.getboolean('ReportDir'):
        outDir = os.path.join(avaDir, 'Outputs', modName, 'reports')
        fU.makeADir(outDir)
    else:
        outDir = os.path.join(avaDir, 'Outputs', 'out1Peak')
        fU.makeADir(outDir)

    # Initialise plot dictionary with simulation names
    plotDict = {}
    for sName in peakFiles['simName']:
        plotDict[sName] = {}

    # Loop through peakFiles and generate plot
    for m in range(len(peakFiles['names'])):

        # Load names and paths of peakFiles
        name = peakFiles['names'][m]
        fileName = peakFiles['files'][m]
        avaName = peakFiles['avaName'][m]
        log.debug('now plot %s:' % (fileName))

        # Load data
        raster = IOf.readRaster(fileName)
        data = raster['rasterData']

        # constrain data to where there is data
        cellSize = peakFiles['cellSize'][m]
        rowsMin, rowsMax, colsMin, colsMax = pU.constrainPlotsToData(
            data, cellSize)
        dataConstrained = data[rowsMin:rowsMax + 1, colsMin:colsMax + 1]
        demConstrained = demField[rowsMin:rowsMax + 1, colsMin:colsMax + 1]

        data = np.ma.masked_where(dataConstrained == 0.0, dataConstrained)
        unit = pU.cfgPlotUtils['unit%s' % peakFiles['resType'][m]]

        # Set extent of peak file
        ny = data.shape[0]
        nx = data.shape[1]
        Ly = ny * cellSize
        Lx = nx * cellSize

        # Figure  shows the result parameter data
        fig = plt.figure(figsize=(pU.figW, pU.figH))
        fig, ax = plt.subplots()
        # choose colormap
        cmap, _, _, norm, ticks = makePalette.makeColorMap(
            pU.cmapPres, np.amin(data), np.amax(data), continuous=pU.contCmap)
        cmap.set_bad(alpha=0)
        rowsMinPlot = rowsMin * cellSize
        rowsMaxPlot = (rowsMax + 1) * cellSize
        colsMinPlot = colsMin * cellSize
        colsMaxPlot = (colsMax + 1) * cellSize
        im0 = ax.imshow(
            demConstrained,
            cmap='Greys',
            extent=[colsMinPlot, colsMaxPlot, rowsMinPlot, rowsMaxPlot],
            origin='lower',
            aspect='equal')
        im1 = ax.imshow(
            data,
            cmap=cmap,
            extent=[colsMinPlot, colsMaxPlot, rowsMinPlot, rowsMaxPlot],
            origin='lower',
            aspect='equal')
        pU.addColorBar(im1, ax, ticks, unit)

        title = str('%s' % name)
        ax.set_title(title)
        ax.set_xlabel('x [m]')
        ax.set_ylabel('y [m]')

        plotName = os.path.join(outDir, '%s.%s' % (name, pU.outputFormat))

        pU.putAvaNameOnPlot(ax, avaDir)

        fig.savefig(plotName)
        if cfgFLAGS.getboolean('showPlot'):
            plt.show()
        plotPath = os.path.join(os.getcwd(), plotName)
        plotDict[peakFiles['simName'][m]].update(
            {peakFiles['resType'][m]: plotPath})
        plt.close('all')

    return plotDict
Exemple #17
0
import os
import time
import glob

# Local imports
from avaframe.in3Utils import logUtils
from avaframe.in3Utils import fileHandlerUtils as fU
from avaframe.ana1Tests import testUtilities as tU

# log file name; leave empty to use default runLog.log
logName = 'runDescriptionDict'

# set directory of benchmark test and if not yet done, this directory is created
testName = 'avaMyTest'
inDir = os.path.join('..', 'benchmarks', testName)
fU.makeADir(inDir)

# Start logging
log = logUtils.initiateLogger(inDir, logName)

# create empty description dictionary template
desDict = tU.createDesDictTemplate()

# fill this empty dictionary with test info
desDict['TAGS'] = ['null']  # in which category does this test fall
desDict['DESCRIPTION'] = " this is my null test"  # what is this test about
desDict['TYPE'] = ["2DPeak"
                   ]  # what type of benchmark data does the test provide
desDict['FILES'] = ["mytest1.asc",
                    "mytest2.asc"]  # which files does the test provide
desDict[
Exemple #18
0
def probAnalysis(avaDir, cfg, cfgMain='', inputDir=''):
    """ Compute propability map of a given set of simulation result exceeding a particular threshold and save to outDir

        Parameters
        ----------
        avaDir: str
            path to avalanche directory
        cfg : dict
            configuration read from ini file of probAna function
        cfgMain : dict
            configuration read from ini file that has been used for the com1DFA simulation
        inputDir : str
            optional - path to directory where data that should be analysed can be found, required if not in com1DFA results
        outDir : str
            optional - path to directory where results shall be saved to
    """

    # Set input and output directory and Load all infos on simulations
    flagStandard = False
    if inputDir == '':
        inputDir = os.path.join(avaDir, 'Outputs', 'com1DFA', 'peakFiles')
        flagStandard = True
        peakFiles = fU.makeSimDict(inputDir, cfgMain['PARAMETERVAR']['varPar'], avaDir=avaDir)
    else:
        peakFiles = fU.makeSimDict(inputDir, avaDir=avaDir)

    outDir = os.path.join(avaDir, 'Outputs', 'ana4Stats')
    fU.makeADir(outDir)

    # get header info from peak files
    header = IOf.readASCheader(peakFiles['files'][0])
    cellSize = header.cellsize
    nRows = header.nrows
    nCols = header.ncols
    xllcenter = header.xllcenter
    yllcenter = header.yllcenter
    noDataValue = header.noDataValue

    # Initialise array for computations
    probSum = np.zeros((nRows, nCols))
    count = 0

    # Loop through peakFiles and compute probability
    for m in range(len(peakFiles['names'])):

        # Load peak field for desired peak field parameter
        # be aware of the standard simulation - so if default value should not be part of the analysis
        if peakFiles['resType'][m] == cfg['GENERAL']['peakVar']:
            log.info('Simulation parameter %s ' % ( cfg['GENERAL']['peakVar']))

            if flagStandard:
                if peakFiles[cfgMain['PARAMETERVAR']['varPar']][m] != cfgMain['DEFVALUES'][cfgMain['PARAMETERVAR']['varPar']]:
                    log.info('Simulation parameter %s= %s ' % (cfgMain['PARAMETERVAR']['varPar'], peakFiles[cfgMain['PARAMETERVAR']['varPar']][m]))

                # Load data
                fileName = peakFiles['files'][m]
                data = np.loadtxt(fileName, skiprows=6)
                dataLim = np.zeros((nRows, nCols))

                log.debug('File name is %s' % fileName)

                # Check if peak values exceed desired threshold
                dataLim[data>float(cfg['GENERAL']['peakLim'])] = 1.0
                probSum = probSum + dataLim
                count = count + 1

            else:

                # Load data
                fileName = peakFiles['files'][m]
                data = np.loadtxt(fileName, skiprows=6)
                dataLim = np.zeros((nRows, nCols))

                log.debug('File name is %s' % fileName)

                # Check if peak values exceed desired threshold
                dataLim[data>float(cfg['GENERAL']['peakLim'])] = 1.0
                probSum = probSum + dataLim
                count = count + 1

    # Create probability map ranging from 0-1
    probMap = probSum / count
    unit = pU.cfgPlotUtils['unit%s' % cfg['GENERAL']['peakVar']]
    log.info('probability analysis performed for peak parameter: %s and a peak value threshold of: %s %s' % (cfg['GENERAL']['peakVar'], cfg['GENERAL']['peakLim'], unit))
    log.info('%s peak fields added to analysis' % count)


    # # Save to .asc file
    avaName = os.path.basename(avaDir)
    outFile = os.path.join(outDir, '%s_probMap%s.asc' % (avaName, cfg['GENERAL']['peakLim']))
    IOf.writeResultToAsc(header, probMap, outFile)