def test_writeCfg2Log(tmp_path): '''Simple test for module writeCfg2Log''' dirname = os.path.dirname(__file__) avalancheDir = dirname logName = 'testCFG' logUtils.initiateLogger(tmp_path, logName) cfg = cfgUtils.getModuleConfig(test_logUtils) logFileName = os.path.join(tmp_path, 'testCFG.log') logFileNameRef = os.path.join(avalancheDir, 'data', 'testCFGRef.tog') f = open(logFileName).readlines() for i in range(3): firstLine = f.pop(0) fref = open(logFileNameRef).readlines() assert f == fref
def test_initiateLogger(capfd): '''Simple test for module initiateLogger''' dirname = os.path.dirname(__file__) avalancheDir = dirname logName = 'testCFG' log = logUtils.initiateLogger(avalancheDir, logName) logFileName = os.path.join(avalancheDir, 'testCFG.log') assert os.path.isfile(logFileName) os.remove(logFileName)
from avaframe.in3Utils import fileHandlerUtils as fU from avaframe.out1Peak import outPlotAllPeak as oP from avaframe.in3Utils import initializeProject as initProj from avaframe.in3Utils import cfgUtils from avaframe.in3Utils import logUtils # log file name; leave empty to use default runLog.log logName = 'runCom1DFA' modName = 'com1DFAOrigOrig' # Load avalanche directory from general configuration file cfgMain = cfgUtils.getGeneralConfig() avalancheDir = cfgMain['MAIN']['avalancheDir'] # Start logging log = logUtils.initiateLogger(avalancheDir, logName) log.debug('MAIN SCRIPT') log.debug('Current avalanche: %s', avalancheDir) # Load input parameters from configuration file # write config to log file cfg = cfgUtils.getModuleConfig(com1DFAOrig) startTime = time.time() # Clean input directory(ies) of old work and output files initProj.cleanSingleAvaDir(avalancheDir, keep=logName) # Run Standalone DFA reportDictList = com1DFAOrig.com1DFAOrigMain(cfg, avalancheDir)
from avaframe.in3Utils import cfgUtils from avaframe.in3Utils import logUtils from benchmarks import simParametersDict # log file name; leave empty to use default runLog.log logName = 'fetchBenchmarkTest' # Load settings from general configuration file cfgMain = cfgUtils.getGeneralConfig() # load all benchmark info as dictionaries from description files testDictList = tU.readAllBenchmarkDesDicts(info=False) # filter benchmarks for a tag type = 'TAGS' valuesList = ['pyVersion'] testList = tU.filterBenchmarks(testDictList, type, valuesList, condition='and') for test in testList: # Start logging avaDir = test['AVADIR'] log = logUtils.initiateLogger(avaDir, logName) # Fetch benchmark test results refFiles = tU.fetchBenchmarkResults(test['NAME'], resTypes=['ppr', 'pfd', 'pfv']) refDir = pathlib.Path('..', 'benchmarks', test['NAME']) benchDict = simParametersDict.fetchBenchParameters(test['NAME']) simNameRef = test['simNameRef']
avalancheDir = cfgAva['MAIN']['avalancheDir'] # set working directory workingDir = os.path.join(avalancheDir, 'Outputs', 'in1Data') fU.makeADir(workingDir) # Load input parameters from configuration file cfgMain = cfgUtils.getGeneralConfig() cfg = cfgUtils.getModuleConfig(cF) cfgGen = cfg['GENERAL'] # log file name; leave empty to use default runLog.log logName = 'runSampleFromDist%s' % cfgGen['distType'] # Start logging log = logUtils.initiateLogger(workingDir, logName) log.info('MAIN SCRIPT') # load parameters required to compute specific distribution a = float(cfgGen['a']) b = float(cfgGen['b']) c = float(cfgGen['c']) steps = int(cfgGen['support']) # compute the support of the distribution x = np.linspace(a, c, steps) # +++++++++++++++Compute desired distribution ++++++++++++ # Derive sample from Pert distribution # compute parameters alpha, beta, mu = cF.computeParameters(a, b, c)
def runCom1DFA(avaDir='', cfgFile='', relThField='', variationDict=''): """ run com1DFA module """ # +++++++++SETUP CONFIGURATION++++++++++++++++++++++++ # log file name; leave empty to use default runLog.log logName = 'runCom1DFA' # Load avalanche directory from general configuration file cfgMain = cfgUtils.getGeneralConfig() if avaDir != '': avalancheDir = avaDir else: avalancheDir = cfgMain['MAIN']['avalancheDir'] # set module name, reqiured as long we are in dev phase # - because need to create e.g. Output folder for com1DFA to distinguish from # current com1DFA modName = 'com1DFA' # Clean input directory(ies) of old work and output files # initProj.cleanSingleAvaDir(avalancheDir, keep=logName, deleteOutput=False) initProj.cleanModuleFiles(avalancheDir, com1DFA, modName) # Start logging log = logUtils.initiateLogger(avalancheDir, logName) log.info('MAIN SCRIPT') log.info('Current avalanche: %s', avalancheDir) # Create output and work directories # - because need to create e.g. Output folder for com1DFA to distinguish from workDir, outDir = inDirs.initialiseRunDirs(avalancheDir, modName) # generate list of simulations from desired configuration if variationDict == '': # Load full configuration modCfg, modInfo = cfgUtils.getModuleConfig(com1DFA, fileOverride=cfgFile, modInfo=True) variationDict = dP.getVariationDict(avalancheDir, modCfg, modInfo) else: # check if variationDict items exist and are provided in correct format # Load standard/ default configuration modCfg = cfgUtils.getDefaultModuleConfig(com1DFA) variationDict = dP.validateVarDict(variationDict, modCfg) log.info('Variations are performed for:') for key in variationDict: log.info('%s: %s' % (key, variationDict[key])) # add avalanche directory info to cfg modCfg['GENERAL']['avalancheDir'] = avalancheDir # fetch input data - dem, release-, entrainment- and resistance areas inputSimFiles = gI.getInputDataCom1DFAPy(avalancheDir, modCfg['FLAGS']) # write full configuration file to file cfgUtils.writeCfgFile(avalancheDir, com1DFA, modCfg, fileName='sourceConfiguration') # create a list of simulations # if need to reproduce exactely the hash - need to be strings with exactely the same number of digits!! simDict = com1DFA.prepareVarSimDict(modCfg, inputSimFiles, variationDict) log.info('The following simulations will be performed') for key in simDict: log.info('Simulation: %s' % key) reportDictList = [] # loop over all simulations for cuSim in simDict: # load configuration dictionary for cuSim cfg = simDict[cuSim]['cfgSim'] # save configuration settings for each simulation simHash = simDict[cuSim]['simHash'] cfgUtils.writeCfgFile(avalancheDir, com1DFA, cfg, fileName=cuSim) # log simulation name log.info('Run simulation: %s' % cuSim) # set release area scenario inputSimFiles['releaseScenario'] = simDict[cuSim]['relFile'] # +++++++++++++++++++++++++++++++++ # ------------------------ particlesList, fieldsList, Tsave, dem, reportDict, cfgFinal = com1DFA.com1DFAMain( cfg, avalancheDir, cuSim, inputSimFiles, outDir, relThField) # +++++++++EXPORT RESULTS AND PLOTS++++++++++++++++++++++++ # Generate plots for all peakFiles plotDict = oP.plotAllPeakFields(avalancheDir, cfg, cfgMain['FLAGS'], modName) reportDictList.append(reportDict) # export for visulation if cfg['VISUALISATION'].getboolean('writePartToCSV'): outDir = os.path.join(avalancheDir, 'Outputs', modName) com1DFA.savePartToCsv(cfg['VISUALISATION']['particleProperties'], particlesList, outDir) # create hash to check if config didnt change simHashFinal = cfgUtils.cfgHash(cfgFinal) if simHashFinal != simHash: log.warning( 'simulation configuration has been changed since start') cfgUtils.writeCfgFile(avalancheDir, com1DFA, cfg, fileName='%s_butModified' % simHash) # Set directory for report reportDir = os.path.join(avalancheDir, 'Outputs', 'com1DFA', 'reports') # write report gR.writeReport(reportDir, reportDictList, cfgMain['FLAGS'], plotDict) # read all simulation configuration files and return dataFrame and write to csv standardCfg = cfgUtils.getDefaultModuleConfig(com1DFA) simDF = cfgUtils.createConfigurationInfo(avalancheDir, standardCfg, writeCSV=True) return particlesList, fieldsList, Tsave, dem, plotDict, reportDictList
type = 'TAGS' valuesList = ['varParTest'] testList = tU.filterBenchmarks(testDictList, type, valuesList, condition='and') # Set directory for full standard test report outDir = os.path.join(os.getcwd(), 'tests', 'reportsVariations') fU.makeADir(outDir) # Start writing markdown style report for standard tests reportFile = os.path.join(outDir, 'variationTestsReport.md') with open(reportFile, 'w') as pfile: # Write header pfile.write('# Variation Tests Report \n') pfile.write('## Compare com1DFA simulations to benchmark results \n') log = logUtils.initiateLogger(outDir, logName) log.info('The following benchmark tests will be fetched ') for test in testList: log.info('%s' % test['NAME']) for test in testList: # Start logging avaDir = test['AVADIR'] # Fetch benchmark test info benchDict = simParametersVar.fetchBenchParameters(test['NAME']) simNameRef = test['simNameRef'] simNameRefTest = simNameRef.replace('ref', 'dfa') refDir = pathlib.Path('..', 'benchmarks', test['NAME']) simType = benchDict['simType']
# Local imports from avaframe.in3Utils import logUtils from avaframe.in3Utils import fileHandlerUtils as fU from avaframe.ana1Tests import testUtilities as tU # log file name; leave empty to use default runLog.log logName = 'runDescriptionDict' # set directory of benchmark test and if not yet done, this directory is created testName = 'avaMyTest' inDir = os.path.join('..', 'benchmarks', testName) fU.makeADir(inDir) # Start logging log = logUtils.initiateLogger(inDir, logName) # create empty description dictionary template desDict = tU.createDesDictTemplate() # fill this empty dictionary with test info desDict['TAGS'] = ['null'] # in which category does this test fall desDict['DESCRIPTION'] = " this is my null test" # what is this test about desDict['TYPE'] = ["2DPeak" ] # what type of benchmark data does the test provide desDict['FILES'] = ["mytest1.asc", "mytest2.asc"] # which files does the test provide desDict[ 'AVANAME'] = 'avaInclinedPlane' # which avalanache does the test refer to # write dictionary to json file
def test_initiateLogger(tmp_path): '''Simple test for module initiateLogger''' logName = 'testCFG' logUtils.initiateLogger(tmp_path, logName) logFileName = os.path.join(tmp_path, 'testCFG.log') assert os.path.isfile(logFileName)
# Load modules import os import shutil # Local imports from avaframe.in3Utils import initializeProject from avaframe.in3Utils import logUtils from avaframe.in3Utils import cfgUtils # log file name; leave empty to use default runLog.log logName = 'initializeProject' # Load avalanche directory from general configuration file cfgMain = cfgUtils.getGeneralConfig() avalancheDir = cfgMain['MAIN']['avalancheDir'] # Start logging log = logUtils.initiateLogger('.', logName) log.info('MAIN SCRIPT') log.info('Initializing Project: %s', avalancheDir) # Initialize project initializeProject.initializeFolderStruct(avalancheDir) logOrigin = os.path.join('.', logName + '.log') logDest = os.path.join(avalancheDir, logName + '.log') shutil.move(logOrigin, logDest)