def __init__(self, lang, extension): super().__init__(lang, extension) self.valid_systems = ['daint:gpu', 'dom:gpu', 'kesch:cn', 'tiger:gpu', 'arolla:cn', 'tsa:cn'] self.num_gpus_per_node = 1 self.num_tasks_per_node = 1 self.system_modules = { 'arolla': ['cuda/10.1.243'], 'daint': ['craype-accel-nvidia60'], 'dom': ['craype-accel-nvidia60'], 'kesch': ['cudatoolkit/8.0.61'], 'tiger': ['craype-accel-nvidia60'], 'tsa': ['cuda/10.1.243'] } sysname = self.current_system.name self.modules += self.system_modules.get(sysname, []) # as long as cuda/9 will not be the default, we will need: if sysname in {'daint', 'kesch'}: self.variables = {'ALLINEA_FORCE_CUDA_VERSION': '8.0'} elif sysname in {'arolla', 'tsa'}: self.variables = {'ALLINEA_FORCE_CUDA_VERSION': '10.1'} self.ddt_options = [ '--offline --output=ddtreport.txt ', '--break-at _jacobi-cuda-kernel.cu:59 --evaluate *residue_d ', '--trace-at _jacobi-cuda-kernel.cu:111,residue' ] self.build_system.cppflags = ['-DUSE_MPI', '-D_CSCS_ITMAX=5'] if self.current_system.name == 'kesch': arch = 'sm_37' self.build_system.ldflags = ['-lm', '-lcudart'] elif self.current_system.name in ['arolla', 'tsa']: arch = 'sm_70' self.build_system.ldflags = ['-lstdc++', '-lm', '-L$EBROOTCUDA/lib64', '-lcudart'] else: arch = 'sm_60' self.build_system.ldflags = ['-lstdc++'] self.build_system.options = ['NVCCFLAGS="-g -arch=%s"' % arch] self.sanity_patterns = sn.all([ sn.assert_found('MPI implementation', 'ddtreport.txt'), sn.assert_found('Evaluate', 'ddtreport.txt'), sn.assert_found(r'\*residue_d:', 'ddtreport.txt'), sn.assert_found(r'Debugging\s*:\s*srun\s+%s' % self.executable, 'ddtreport.txt'), sn.assert_lt(sn.abs(sn.extractsingle( r'^tracepoint\s+.*\s+residue:\s+(?P<result>\S+)', 'ddtreport.txt', 'result', float) - 0.25), 1e-5), sn.assert_found(r'Every process in your program has terminated\.', 'ddtreport.txt') ])
def __init__(self, scale, variant): super().__init__() self.descr = f'QuantumESPRESSO GPU check (version: {scale}, {variant})' self.valid_systems = ['daint:gpu'] # FIXME: Unify modules after daint upgrade if self.current_system.name == 'daint': self.modules = ['QuantumESPRESSO/6.5a1-CrayPGI-19.10-cuda-10.1'] else: self.modules = ['QuantumESPRESSO'] self.num_gpus_per_node = 1 if scale == 'small': self.valid_systems += ['dom:gpu'] self.num_tasks = 6 energy_reference = -11427.09017176 else: self.num_tasks = 16 energy_reference = -11427.09017179 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 energy = sn.extractsingle(r'!\s+total energy\s+=\s+(?P<energy>\S+) Ry', self.stdout, 'energy', float) energy_diff = sn.abs(energy-energy_reference) self.sanity_patterns = sn.all([ self.sanity_patterns, sn.assert_lt(energy_diff, 1e-8) ]) references = { 'maint': { 'small': { 'dom:gpu': {'time': (61.0, None, 0.05, 's')}, 'daint:gpu': {'time': (61.0, None, 0.05, 's')} }, 'large': { 'daint:gpu': {'time': (54.0, None, 0.05, 's')} } }, 'prod': { 'small': { 'dom:gpu': {'time': (61.0, None, 0.05, 's')}, 'daint:gpu': {'time': (61.0, None, 0.05, 's')} }, 'large': { 'daint:gpu': {'time': (54.0, None, 0.05, 's')} } } } self.reference = references[variant][scale] self.tags |= {'maintenance' if variant == 'maint' else 'production'}
def __init__(self): super().__init__() self.descr = 'OpenFOAM check of reconstructPar: heatexchanger tutorial' self.executable_opts = ['-latestTime', '-region air'] self.readonly_files = [ 'processor0', 'processor1', 'processor2', 'processor3' ] self.sanity_patterns = sn.all([ sn.assert_found('Time = 2000', self.stdout), sn.assert_found(r'^\s*[Ee]nd', self.stdout) ])
def __init__(self): super().__init__() self.descr = ('verifies reading info of a compressed netCDF-4 file') self.executable = 'ncks' self.executable_opts = [ '-M', 'test_echam_spectral-deflated_wind10_wl_ws.nc4c' ] self.sanity_patterns = sn.all([ sn.assert_not_found(r'(?i)unsupported|error', self.stderr), sn.assert_found(r'physics.*Modified ECMWF physics', self.stdout) ])
def __init__(self): super().__init__() self.descr = 'OpenFOAM check of foamyHexMesh: motorbike tutorial' self.executable_opts = ['-parallel'] self.num_tasks = 8 self.num_tasks_per_node = 8 self.sanity_patterns = sn.all([ sn.assert_found('Time = 100\n', self.stdout), sn.assert_found('Finalising parallel run', self.stdout), sn.assert_found(r'^\s*[Ee]nd', self.stdout) ])
def __init__(self): super().__init__() self.descr = 'OpenFOAM check of pimpleFoam: tjunction tutorial' residual = sn.extractall( r'Solving for epsilon, \w+\s\w+\s=\s\d.\d+.\s' r'Final residual\s=\s(?P<res>-?\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all( sn.chain( sn.map(lambda x: sn.assert_lt(x, 5.e-05), residual), [sn.assert_found(r'^\s*[Ee]nd', self.stdout)], ))
def __init__(self): super().__init__() self.descr = 'OpenFOAM check of checkMesh: flange tutorial' self.executable_opts = ['-latestTime', '-allTopology', '-allGeometry', '-parallel'] self.num_tasks = 8 self.num_tasks_per_node = 8 self.sanity_patterns = sn.all([ sn.assert_found('Finalising parallel run', self.stdout), sn.assert_found(r'^\s*[Ee]nd', self.stdout) ])
def assert_successful_execution(self): '''Checks that the program is running on 2 different nodes (nids are different), that IPCMagic is configured and returns the correct end-of-program message (returns the slope parameter in the end).''' nids = sn.extractall(r'nid(?P<nid>\d+)', self.stdout, 'nid', str) return sn.all([ sn.assert_eq(sn.len(nids), 2), sn.assert_ne(nids[0], nids[1]), sn.assert_found(r'slope=\S+', self.stdout) ])
def __init__(self): self.valid_prog_environs = ['PrgEnv-gnu'] self.valid_systems = [ 'daint:gpu', 'dom:gpu', 'kesch:cn', 'tiger:gpu', 'arolla:cn', 'tsa:cn' ] self.num_gpus_per_node = 1 self.num_tasks_per_node = 1 self.sourcesdir = 'src/Cuda' self.executable = 'cuda-gdb' self.executable_opts = ['-x .in.cudagdb ./cuda_gdb_check'] # unload xalt to avoid runtime error: self.pre_run = ['unset LD_PRELOAD'] if self.current_system.name == 'kesch': self.exclusive_access = True self.modules = ['cudatoolkit/8.0.61'] nvidia_sm = '37' elif self.current_system.name in ['arolla', 'tsa']: self.exclusive_access = True self.modules = ['cuda/10.1.243'] nvidia_sm = '70' else: self.modules = ['craype-accel-nvidia60'] nvidia_sm = '60' self.build_system = 'Make' self.build_system.makefile = 'Makefile_cuda_gdb' self.build_system.cflags = [ '-g', '-D_CSCS_ITMAX=1', '-DUSE_MPI', '-fopenmp' ] self.build_system.cxxflags = ['-g', '-G', '-arch=sm_%s' % nvidia_sm] self.build_system.ldflags = ['-g', '-fopenmp', '-lstdc++'] if self.current_system.name == 'kesch': self.build_system.ldflags = [ '-g', '-fopenmp', '-lcublas', '-lcudart', '-lm' ] elif self.current_system.name in ['arolla', 'tsa']: self.build_system.ldflags += [ '-L$EBROOTCUDA/lib64', '-lcudart', '-lm' ] self.sanity_patterns = sn.all([ sn.assert_found(r'^Breakpoint 1 at .*: file ', self.stdout), sn.assert_found(r'_jacobi-cuda-kernel.cu, line 59\.', self.stdout), sn.assert_found(r'^\(cuda-gdb\) quit', self.stdout), sn.assert_lt( sn.abs( sn.extractsingle(r'\$1\s+=\s+(?P<result>\S+)', self.stdout, 'result', float)), 1e-5) ]) self.maintainers = ['MKr', 'JG'] self.tags = {'production', 'craype'}
def __init__(self): super().__init__() self.descr = ('OpenFOAM-Extend check of reconstructPar: ' 'multiRegionHeater test') self.executable_opts = ['-parallel'] self.num_tasks = 4 self.num_tasks_per_node = 4 result = sn.extractall(r'\sglobal\s=\s(?P<res>-?\S+),', self.stdout, 'res', float)[-5:] self.sanity_patterns = sn.all( sn.map(lambda x: sn.assert_lt(abs(x), 1.e-04), result))
def __init__(self, lang): super().__init__() self.name = 'scorep_mpi_omp_%s' % lang.replace('+', 'p') self.descr = 'SCORE-P %s check' % lang self.valid_systems = ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc'] # Score-P fails with latest clang based cce and pgi compilers: # src/measurement/thread/fork_join/scorep_thread_fork_join_omp.c:402: # Fatal: Bug 'TPD == 0': Invalid OpenMP thread specific data object. # -> removing cce from supported compiler for now. self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel'] self.prgenv_flags = { 'PrgEnv-gnu': ['-g', '-fopenmp'], 'PrgEnv-intel': ['-g', '-openmp'], } self.sourcesdir = os.path.join('src', lang) self.executable = 'jacobi' self.build_system = 'Make' self.build_system.makefile = 'Makefile_scorep_mpi_omp' # NOTE: Restrict concurrency to allow creation of Fortran modules if lang == 'F90': self.build_system.max_concurrency = 1 self.num_tasks = 3 self.num_tasks_per_node = 3 self.num_cpus_per_task = 4 self.num_iterations = 200 self.variables = { 'OMP_NUM_THREADS': str(self.num_cpus_per_task), 'ITERATIONS': str(self.num_iterations), 'SCOREP_ENABLE_PROFILING': 'false', 'SCOREP_ENABLE_TRACING': 'true', 'OMP_PROC_BIND': 'true', 'SCOREP_TIMER': 'clock_gettime' } cpu_count = self.num_cpus_per_task * self.num_tasks_per_node self.otf2_file = 'otf2.txt' self.sanity_patterns = sn.all([ sn.assert_found('SUCCESS', self.stdout), sn.assert_eq( sn.count( sn.extractall(r'(?P<line>LEAVE.*omp\s+\S+\s+\@_jacobi)', self.otf2_file, 'line')), 4 * self.num_iterations * cpu_count), sn.assert_not_found('warning|WARNING', self.stderr) ]) self.maintainers = ['MK', 'JG'] self.tags = {'production'} # additional program call in order to generate the tracing output for # the sanity check self.post_run = [ 'otf2-print scorep-*/traces.otf2 > %s' % self.otf2_file ]
def set_sanity(self): # {{{ 0/ MPICH version: # MPI VERSION : CRAY MPICH version 7.7.15 (ANL base 3.2) # MPI VERSION : CRAY MPICH version 8.0.16.17 (ANL base 3.3) regex = r'^MPI VERSION\s+: CRAY MPICH version \S+ \(ANL base (\S+)\)' rpt_file = os.path.join(self.stagedir, self.rpt) mpich_version = sn.extractsingle(regex, rpt_file, 1) reference_files = { '3.2': { 'control': 'mpit_control_vars_32.ref', 'categories': 'mpit_categories_32.ref', }, '3.3': { 'control': 'mpit_control_vars_33.ref', 'categories': 'mpit_categories_33.ref', } } # }}} # {{{ 1/ MPI Control Variables: MPIR_... # --- extract reference data: regex = r'^(?P<vars>MPIR\S+)$' ref_file = os.path.join( self.stagedir, reference_files[sn.evaluate(mpich_version)]['control'] ) self.ref_control_vars = sorted(sn.extractall(regex, ref_file, 'vars')) # --- extract runtime data: regex = r'^\t(?P<vars>MPIR\S+)\t' self.run_control_vars = sorted(sn.extractall(regex, rpt_file, 'vars')) # --- debug with: grep -P '\tMPIR+\S*\t' rpt |awk '{print $1}' |sort # }}} # {{{ 2/ MPI Category: # --- extract reference data: regex = r'^(?P<category>.*)$' ref = os.path.join( self.stagedir, reference_files[sn.evaluate(mpich_version)]['categories'] ) ref_cat_vars = sorted(sn.extractall(regex, ref, 'category')) self.ref_cat_vars = list(filter(None, ref_cat_vars)) # --- extract runtime data: regex = (r'^(?P<category>Category \w+ has \d+ control variables, \d+' r' performance variables, \d+ subcategories)') rpt = os.path.join(self.stagedir, self.rpt) self.run_cat_vars = sorted(sn.extractall(regex, rpt, 'category')) # }}} # {{{ 3/ Extracted lists can be compared (when sorted): self.sanity_patterns = sn.all([ sn.assert_eq(self.ref_control_vars, self.run_control_vars, msg='sanity1 "mpit_control_vars.ref" failed'), sn.assert_eq(self.ref_cat_vars, self.run_cat_vars, msg='sanity2 "mpit_categories.ref" failed'), ])
def __init__(self): super().__init__() self.descr = ('verifies that the NCO supports the nc4 filetype') self.sourcesdir = None self.executable = 'ncks' self.executable_opts = ['-r'] self.sanity_patterns = sn.all([ sn.assert_found(r'^netCDF4/HDF5 (support|available)\s+Yes\W', self.stdout), sn.assert_found(r'^netCDF4/HDF5 (support|enabled)\s+Yes\W', self.stdout) ])
def validate_passed(self): return sn.all([ sn.assert_not_found( r'invalid because the ratio', self.outfile_lazy, msg='number of processes assigned could not be factorized' ), sn.assert_eq( 4, sn.count(sn.findall(r'PASSED', self.outfile_lazy)) ), sn.assert_eq(0, self.num_tasks_assigned % self.num_tasks_per_node) ])
def __init__(self): self.dep_name = f'spack_config_check_{util.toalphanum(self.spack_version)}' self.depends_on(self.dep_name, how=udeps.by_env) self.sanity_patterns = sn.all([ sn.assert_not_found(r'ERROR', self.stderr), sn.assert_not_found(r'Error', self.stderr), sn.assert_not_found(r'missing', self.stderr), sn.assert_not_found(r'command not found', self.stderr), ]) self.executable_opts += [self.spack_pkg] self.postrun_cmds = [f'spack install {self.spack_pkg}']
def set_sanity_patterns(self): valid_test_ids = {i for i in range(11) if i not in {6, 9}} assert_finished_tests = [ sn.assert_eq( sn.count(sn.findall('Test%s finished' % test_id, self.stdout)), self.job.num_tasks) for test_id in valid_test_ids ] self.sanity_patterns = sn.all([ *assert_finished_tests, sn.assert_not_found('(?i)ERROR', self.stdout), sn.assert_not_found('(?i)ERROR', self.stderr) ])
def assert_count_gpus(self): return sn.all([ sn.assert_eq( sn.count(sn.findall(r'\[\S+\] Found \d+ gpu\(s\)', self.stdout)), self.num_tasks_assigned), sn.assert_eq( sn.count( sn.findall( r'\[\S+\] \[gpu \d+\] Kernel launch ' r'latency: \S+ us', self.stdout)), self.num_tasks_assigned * self.num_gpus_per_node) ])
def __init__(self, **kwargs): super().__init__( 'buoyantBoussinesqSimpleFoam', 'OpenFOAM-Extend check buoyantBoussinesqSimpleFoam: hotRoom test', **kwargs) self.executable = 'buoyantBoussinesqSimpleFoam' result = sn.extractall(r'\sglobal\s=\s(?P<res>\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all( sn.map(lambda x: sn.assert_lt(abs(x), 1.e-17), result))
def __init__(self): # {{{ pe self.descr = 'Tool validation' self.valid_prog_environs = [ 'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi', 'PrgEnv-cray', 'PrgEnv-aocc', 'cpeAMD', 'cpeCray', 'cpeGNU', 'cpeIntel' ] # NOTE: dom (slurm/20.11.4) is failing self.valid_systems = [ 'dom:mc', 'dom:gpu', 'daint:mc', 'daint:gpu', 'eiger:mc', 'pilatus:mc' ] self.tool = 'gdb4hpc' self.modules = [self.tool] self.maintainers = ['JG'] self.tags = {'sph', 'hpctools', 'cpu', 'craype'} # }}} # {{{ compile self.testname = 'sedov' self.sourcepath = f'{self.testname}.cpp' # TODO: self.prgenv_flags = -O0 self.executable = self.tool self.target_executable = './mpi+omp' # self.sourcepath = f'{self.testname}.cpp' # self.target_executable = f'./{self.testname}.exe' # self.postbuild_cmds = [f'mv {self.tool} {self.target_executable}'] # }}} # {{{ run self.time_limit = '10m' self.version_rpt = 'version.rpt' self.which_rpt = 'which.rpt' self.gdb_slm = './gdb4hpc.slm' self.gdb_in = './gdb4hpc.in' self.gdb_rpt = './gdb4hpc.rpt' self.executable_opts = [f'-b {self.gdb_in} #'] self.prerun_cmds = [ # f'srun --version >> {self.version_rpt}', f'{self.tool} --version >> {self.version_rpt}', f'which {self.tool} &> {self.which_rpt}', 'echo starttime=`date +%s`', # this is needed because: '# Everything between the "#cray_debug_xxx" lines will be ignored', '#cray_debug_start', ] # }}} # {{{ sanity self.sanity_patterns = sn.all([ # check the job output: sn.assert_found(r'Shutting down debugger and killing application', self.stdout), ])
def __init__(self, **kwargs): super().__init__('info_nc4c', **kwargs) self.descr = ('verifies reading info of a compressed netCDF-4 file') self.executable = 'ncks' self.executable_opts = [ '-M', 'test_echam_spectral-deflated_wind10_wl_ws.nc4c' ] self.sanity_patterns = sn.all([ sn.assert_not_found(r'(?i)unsupported|error', self.stderr), sn.assert_found(r'^Global attribute \d+: CDO, size = 63 NC_CHAR', self.stdout) ])
def __init__(self): super().__init__() self.descr = ('verifies reading info of a standard netCDF file') self.executable = 'cdo' self.executable_opts = ['info', 'sresa1b_ncar_ccsm3-example.nc'] # TODO: Add here also Warning? then it fails currently... self.sanity_patterns = sn.all([ sn.assert_not_found(r'(?i)unsupported|error', self.stderr), sn.assert_found( r'info: Processed( 688128 values from)? ' r'5 variables over 1 timestep', self.stderr) ])
def __init__(self): super().__init__() self.descr = 'OpenFOAM-Extend check of simpleFoam: motorbike tutorial' self.executable_opts = ['-parallel'] self.num_tasks = 6 self.num_tasks_per_node = 6 result = sn.extractall( r'time step continuity errors : ' r'\S+\s\S+ = \S+\sglobal = (?P<res>-?\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all( sn.map(lambda x: sn.assert_lt(abs(x), 5.e-04), result))
def __init__(self): self.descr = 'Check for standard Cray variables' self.valid_prog_environs = ['builtin'] self.executable = 'module' self.executable_opts = ['show', self.cray_module] envvar_prefix = self.cray_module.upper().replace('-', '_') self.sanity_patterns = sn.all([ sn.assert_found(f'{envvar_prefix}_PREFIX', self.stderr), sn.assert_found(f'{envvar_prefix}_VERSION', self.stderr) ]) self.tags = {'production', 'craype'} self.maintainers = ['EK', 'TM']
def __init__(self, **kwargs): super().__init__( 'buoyantBoussinesqSimpleFoam', 'OpenFOAM check of buoyantBoussinesqSimpleFoam: hotroom tutorial', **kwargs) self.executable = 'buoyantBoussinesqSimpleFoam' residual = sn.extractall(r'\sglobal\s=\s(?P<res>\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all( sn.chain(sn.map(lambda x: sn.assert_lt(x, 1.e-17), residual), [sn.assert_found(r'^\s*[Ee]nd', self.stdout)]))
def __init__(self, **kwargs): super().__init__( 'interMixingFoam', 'OpenFOAM check of interMixingFoam:' 'dambreak tutorial', **kwargs) self.sanity_patterns = sn.all([ sn.assert_eq( sn.count( sn.findall('(?P<line>Air phase volume fraction)', self.stdout)), 2534), sn.assert_found(r'^\s*[Ee]nd', self.stdout) ])
def __init__(self, prg_envs): super().__init__() self.valid_systems = ['daint:gpu', 'dom:gpu'] self.valid_prog_environs = prg_envs self.modules = ['craype-accel-nvidia60'] self.configs = { 'PrgEnv-gnu': 'cscs-gnu', 'PrgEnv-cray': 'cscs-cray', 'PrgEnv-pgi': 'cscs-pgi', } app_source = os.path.join(self.current_system.resourcesdir, 'SPEC_ACCELv1.2') self.prebuild_cmd = ['cp -r %s/* .' % app_source, './install.sh -d . -f'] # I just want prebuild_cmd, but no action for the build_system # is not supported, so I find it something useless to do self.build_system = 'SingleSource' self.sourcepath = './benchspec/ACCEL/353.clvrleaf/src/timer_c.c' self.build_system.cflags = ['-c'] self.refs = { env: {bench_name: (rt, None, 0.1, 'Seconds') for (bench_name, rt) in zip(self.benchmarks[env], self.exec_times[env])} for env in self.valid_prog_environs } self.num_tasks = 1 self.num_tasks_per_node = 1 self.time_limit = (0, 30, 0) self.executable = 'runspec' outfile = sn.getitem(sn.glob('result/ACCEL.*.log'), 0) self.sanity_patterns_ = { env: sn.all([sn.assert_found( r'Success.*%s' % bn, outfile) for bn in self.benchmarks[env]]) for env in self.valid_prog_environs } self.perf_patterns_ = { env: {bench_name: sn.avg(sn.extractall( r'Success.*%s.*runtime=(?P<rt>[0-9.]+)' % bench_name, outfile, 'rt', float)) for bench_name in self.benchmarks[env]} for env in self.valid_prog_environs } self.maintainers = ['SK'] self.tags = {'diagnostic', 'external-resources'}
def __init__(self, scale): super().__init__() self.descr = 'Quantum Espresso CPU check' self.maintainers = ['AK', 'LM'] self.tags = {'scs', 'production', 'external-resources'} self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'Espresso') self.valid_systems = ['daint:mc'] self.valid_prog_environs = ['PrgEnv-intel'] self.modules = ['QuantumESPRESSO'] self.executable = 'pw.x' self.executable_opts = ['-in', 'ausurf.in'] if scale == 'small': self.valid_systems += ['dom:mc'] self.num_tasks = 216 self.num_tasks_per_node = 36 self.reference = { 'dom:mc': { 'time': (159.0, None, 0.05, 's'), }, 'daint:mc': { 'time': (151.6, None, 0.05, 's') }, } else: self.num_tasks = 576 self.num_tasks_per_node = 36 self.reference = { 'daint:mc': { 'time': (157.0, None, 0.40, 's') }, } self.use_multithreading = True self.extra_resources = { 'switches': { 'num_switches': 1 } } self.strict_check = False energy = sn.extractsingle(r'!\s+total energy\s+=\s+(?P<energy>\S+) Ry', self.stdout, 'energy', float) self.sanity_patterns = sn.all([ sn.assert_found(r'convergence has been achieved', self.stdout), sn.assert_reference(energy, -11427.09017162, -1e-10, 1e-10) ]) self.perf_patterns = { 'time': sn.extractsingle(r'electrons :\s+(?P<sec>\S+)s CPU ', self.stdout, 'sec', float) }
def __init__(self, **kwargs): super().__init__('collapseEdges', 'OpenFOAM check of collapseEdges: flange tutorial', **kwargs) self.executable_opts = ['-latestTime', '-collapseFaces', '-parallel'] self.num_tasks = 8 self.num_tasks_per_node = 8 self.sanity_patterns = sn.all([ sn.assert_found('Finalising parallel run', self.stdout), sn.assert_found(r'^\s*[Ee]nd', self.stdout) ])
def __init__(self, **kwargs): super().__init__( 'snappyHexMesh', 'OpenFOAM check of snappyHexMesh: motorbike tutorial', **kwargs) self.executable_opts = ['-overwrite', ' -parallel'] self.num_tasks = 6 self.num_tasks_per_node = 6 self.sanity_patterns = sn.all([ sn.assert_found('Finalising parallel run', self.stdout), sn.assert_found(r'^\s*[Ee]nd', self.stdout) ])
def validate_energy(self): energy = sn.avg(sn.extractall( r'ENERGY:([ \t]+\S+){10}[ \t]+(?P<energy>\S+)', self.stdout, 'energy', float) ) energy_reference = -2451359.5 energy_diff = sn.abs(energy - energy_reference) return sn.all([ sn.assert_eq(sn.count(sn.extractall( r'TIMING: (?P<step_num>\S+) CPU:', self.stdout, 'step_num')), 50), sn.assert_lt(energy_diff, 2720) ])