def __init__(self, num_ranks, test_folder): super().__init__() self.descr = 'SCF check' self.valid_systems = ['osx', 'daint'] self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel'] self.num_tasks = num_ranks if self.current_system.name == 'daint': # self.modules = ['PrgEnv-intel', 'cray-hdf5', 'cudatoolkit', 'gcc', 'daint-gpu', 'EasyBuild-custom/cscs', # 'GSL/2.5-CrayIntel-18.08', 'libxc/4.2.3-CrayIntel-18.08', 'magma/2.4.0-CrayIntel-18.08-cuda-9.1', # 'spglib/1.12.0-CrayIntel-18.08'] self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 self.variables = { 'OMP_NUM_THREADS': str(self.num_cpus_per_task), 'MKL_NUM_THREADS': str(self.num_cpus_per_task) } self.executable = 'sirius.scf' self.sourcesdir = '../../verification/' + test_folder data_ref = load_json('output_ref.json') fout = 'output.json' self.sanity_patterns = sn.all([ sn.assert_found(r'converged after', self.stdout, msg="Calculation didn't converge"), sn.assert_lt(energy_diff(fout, data_ref), 1e-5, msg="Total energy is different"), sn.assert_lt(stress_diff(fout, data_ref), 1e-5, msg="Stress tensor is different"), sn.assert_lt(forces_diff(fout, data_ref), 1e-5, msg="Atomic forces are different") ]) self.executable_opts = ['--output=output.json']
def setup(self, partition, environ, **job_opts): result = sn.findall( r'Hello World from thread \s*(\d+) out ' r'of \s*(\d+) from process \s*(\d+) out of ' r'\s*(\d+)', self.stdout) self.sanity_patterns = sn.all( sn.chain( [ sn.assert_eq(sn.count(result), self.num_tasks * self.num_cpus_per_task) ], sn.map( lambda x: sn.assert_lt(int(x.group(1)), int(x.group(2))), result), sn.map( lambda x: sn.assert_lt(int(x.group(3)), int(x.group(4))), result), sn.map( lambda x: sn.assert_lt(int(x.group(1)), self. num_cpus_per_task), result), sn.map( lambda x: sn.assert_eq(int(x.group(2)), self. num_cpus_per_task), result), sn.map(lambda x: sn.assert_lt(int(x.group(3)), self.num_tasks), result), sn.map(lambda x: sn.assert_eq(int(x.group(4)), self.num_tasks), result), )) self.perf_patterns = { 'compilation_time': sn.getattr(self, 'compilation_time_seconds') } self.reference = {'*': {'compilation_time': (60, None, 0.1)}} super().setup(partition, environ, **job_opts)
def __init__(self, variant, lang, linkage): self.linkage = linkage self.variables = {'CRAYPE_LINK_TYPE': linkage} self.prgenv_flags = {} self.lang_names = {'c': 'C', 'cpp': 'C++', 'f90': 'Fortran 90'} self.descr = self.lang_names[lang] + ' Hello World' self.sourcepath = 'hello_world' self.build_system = 'SingleSource' self.valid_systems = ['ubelix:compute', 'ubelix:gpu'] self.valid_prog_environs = ['foss', 'intel'] self.compilation_time_seconds = None result = sn.findall( r'Hello World from thread \s*(\d+) out ' r'of \s*(\d+) from process \s*(\d+) out of ' r'\s*(\d+)', self.stdout) num_tasks = sn.getattr(self, 'num_tasks') num_cpus_per_task = sn.getattr(self, 'num_cpus_per_task') def tid(match): return int(match.group(1)) def num_threads(match): return int(match.group(2)) def rank(match): return int(match.group(3)) def num_ranks(match): return int(match.group(4)) self.sanity_patterns = sn.all( sn.chain( [ sn.assert_eq(sn.count(result), num_tasks * num_cpus_per_task) ], sn.map(lambda x: sn.assert_lt(tid(x), num_threads(x)), result), sn.map(lambda x: sn.assert_lt(rank(x), num_ranks(x)), result), sn.map(lambda x: sn.assert_lt(tid(x), num_cpus_per_task), result), sn.map( lambda x: sn.assert_eq(num_threads(x), num_cpus_per_task), result), sn.map(lambda x: sn.assert_lt(rank(x), num_tasks), result), sn.map(lambda x: sn.assert_eq(num_ranks(x), num_tasks), result), )) self.perf_patterns = { 'compilation_time': sn.getattr(self, 'compilation_time_seconds') } self.reference = {'*': {'compilation_time': (60, None, 0.1, 's')}} self.maintainers = ['VH', 'EK'] self.tags = {'production', 'prgenv'}
def __init__(self, num_ranks_k, num_ranks_d, test_folder, variant, energy_tol=1e-6, pressure_tol=1e-1, stress_tol=1e-4, forces_tol=1e-4): super().__init__() self.descr = 'SCF check' self.valid_systems = ['osx', 'daint'] self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel'] self.num_tasks = num_ranks_k * num_ranks_d if self.current_system.name == 'daint': self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 self.variables = { 'OMP_NUM_THREADS': str(self.num_cpus_per_task), 'MKL_NUM_THREADS': str(self.num_cpus_per_task) } self.executable = 'pw.x' self.sourcesdir = '../' + test_folder self.executable_opts = [ "-i pw.in", "-npool %i" % num_ranks_k, "-ndiag %i" % num_ranks_d ] if variant == 'sirius': self.executable_opts.append('-sirius_scf') #e1 = get_energy(self.stdout) #e2 = get_energy('out.txt') patterns = [ sn.assert_found(r'convergence has been achieved', self.stdout), sn.assert_lt(energy_diff(self.stdout, 'out.txt'), energy_tol, msg="Total energy is different"), sn.assert_lt(pressure_diff(self.stdout, 'out.txt'), pressure_tol, msg="Pressure is different"), sn.assert_lt(stress_diff(self.stdout, 'out.txt'), stress_tol, msg="Stress tensor is different"), sn.assert_lt(forces_diff(self.stdout, 'out.txt'), forces_tol, msg="Atomic forces are different") ] if variant == 'sirius': patterns.append(sn.assert_found(r'SIRIUS.+git\shash', self.stdout)) self.sanity_patterns = sn.all(patterns)
def set_sanity(self): hellos = sn.findall(r'hello world from OS-thread \s*(?P<tid>\d+) on ' r'locality (?P<lid>\d+)', self.stdout) # https://stellar-group.github.io/hpx/docs/sphinx/branches/master/html/terminology.html#term-locality num_localities = self.num_tasks // self.num_tasks_per_node assert_num_tasks = sn.assert_eq(sn.count(hellos), self.num_tasks*self.num_cpus_per_task) assert_threads = sn.map(lambda x: sn.assert_lt(int(x.group('tid')), self.num_cpus_per_task), hellos) assert_localities = sn.map(lambda x: sn.assert_lt(int(x.group('lid')), num_localities), hellos) self.sanity_patterns = sn.all(sn.chain([assert_num_tasks], assert_threads, assert_localities))
def __init__(self): self.valid_prog_environs = ['PrgEnv-gnu'] self.modules = ['LAMMPS'] # Reset sources dir relative to the SCS apps prefix self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'LAMMPS') energy_reference = -4.6195 energy = sn.extractsingle( r'\s+500000(\s+\S+){3}\s+(?P<energy>\S+)\s+\S+\s\n', self.stdout, 'energy', float) self.perf_patterns = { 'perf': sn.extractsingle(r'\s+(?P<perf>\S+) timesteps/s', self.stdout, 'perf', float), } energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_found(r'Total wall time:', self.stdout), sn.assert_lt(energy_diff, 6e-4) ]) self.strict_check = False self.extra_resources = {'switches': {'num_switches': 1}} self.tags = {'scs', 'external-resources'} self.maintainers = ['TR', 'VH']
def __init__(self, linkage): self.descr = ('Compile/run PETSc 2D Poisson example with cray-petsc ' '(%s linking)') % linkage self.valid_systems = ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc'] self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-intel'] self.sourcepath = 'poisson2d.c' self.modules = ['cray-petsc'] self.num_tasks = 16 self.num_tasks_per_node = 8 self.build_system = 'SingleSource' # FIXME: static compilation yields a link error in case of # PrgEnv-intel (Cray Bug #255701) workaround use C++ compiler if linkage == 'static': self.build_system.cc = 'CC' self.variables = {'CRAYPE_LINK_TYPE': linkage} self.executable_opts = ['-da_grid_x 4', '-da_grid_y 4', '-ksp_monitor'] # Check the final residual norm for convergence norm = sn.extractsingle(r'\s+\d+\s+KSP Residual norm\s+(?P<norm>\S+)', self.stdout, 'norm', float, -1) self.sanity_patterns = sn.assert_lt(norm, 1.0e-5) self.tags = {'production', 'craype'} self.maintainers = ['AJ', 'CB']
def scalapack_sanity(number1, number2, expected_value): symbol = 'z{0}{1}'.format(number1, number2) pattern = r'Z\( {0}, {1}\)=\s+(?P<{2}>\S+)'.format( number2, number1, symbol) found_value = sn.extractsingle(pattern, self.stdout, symbol, fortran_float) return sn.assert_lt(sn.abs(expected_value - found_value), 1.0e-15)
def __init__(self, variant): super().__init__() self.descr = ('Compile/run PETSc 2D Poisson example with cray-petsc ' '(%s linking)') % variant self.valid_systems = ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc'] self.valid_prog_environs = [ 'PrgEnv-cray', 'PrgEnv-gnu', 'PrgEnv-intel' ] self.sourcepath = 'poisson2d.c' self.modules = ['cray-petsc'] self.num_tasks = 16 self.num_tasks_per_node = 8 self.build_system = 'SingleSource' if variant == 'dynamic': self.build_system.cflags = ['-dynamic'] self.executable_opts = ['-da_grid_x 4', '-da_grid_y 4', '-ksp_monitor'] # Check the final residual norm for convergence norm = sn.extractsingle(r'\s+\d+\s+KSP Residual norm\s+(?P<norm>\S+)', self.stdout, 'norm', float, -1) self.sanity_patterns = sn.assert_lt(norm, 1.0e-5) self.tags = {'production'} self.maintainers = ['WS', 'AJ', 'TM']
def scalapack_sanity(number1, number2, expected_value): symbol = f'z{number1}{number2}' pattern = (rf'Z\( {number2}, {number1}\)=' rf'\s+(?P<{symbol}>\S+)') found_value = sn.extractsingle(pattern, self.stdout, symbol, fortran_float) return sn.assert_lt(sn.abs(expected_value - found_value), 1.0e-15)
def __init__(self): super().__init__() self.valid_systems = ['dom:gpu', 'daint:gpu'] self.valid_prog_environs = ['PrgEnv-pgi'] self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'RRTMGP') self.tags = {'external-resources'} self.prebuild_cmd = ['cp build/Makefile.conf.dom build/Makefile.conf'] self.executable = 'python' self.executable_opts = [ 'util/scripts/run_tests.py', '--verbose', '--rel_diff_cut 1e-13', '--root ..', '--test ${INIFILE}_ncol-${NCOL}.ini' ] self.pre_run = [ 'pwd', 'module load netcdf-python/1.4.1-CrayGNU-19.06-python2', 'cd test' ] self.modules = ['craype-accel-nvidia60', 'cray-netcdf'] self.variables = {'NCOL': '500', 'INIFILE': 'openacc-solvers-lw'} values = sn.extractall(r'.*\[\S+, (\S+)\]', self.stdout, 1, float) self.sanity_patterns = sn.all( sn.chain( [sn.assert_gt(sn.count(values), 0, msg='regex not matched')], sn.map(lambda x: sn.assert_lt(x, 1e-5), values))) self.maintainers = ['WS', 'VK']
def __init__(self, input_file, output_file): self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'Amber') self.valid_prog_environs = ['builtin'] self.modules = ['Amber'] self.num_tasks = 1 self.num_tasks_per_node = 1 self.num_gpus_per_node = 1 self.executable_opts = ['-O', '-i', input_file, '-o', output_file] self.keep_files = [output_file] self.extra_resources = {'switches': {'num_switches': 1}} energy = sn.extractsingle(r' Etot\s+=\s+(?P<energy>\S+)', output_file, 'energy', float, item=-2) energy_reference = -443246.8 energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_found(r'Final Performance Info:', output_file), sn.assert_lt(energy_diff, 14.9) ]) self.perf_patterns = { 'perf': sn.extractsingle(r'ns/day =\s+(?P<perf>\S+)', output_file, 'perf', float, item=1) } self.maintainers = ['SO', 'VH'] self.tags = {'scs', 'external-resources'}
def __init__(self): self.valid_prog_environs = ['builtin'] self.executable = 'cp2k.psmp' self.executable_opts = ['H2O-256.inp'] energy = sn.extractsingle( r'\s+ENERGY\| Total FORCE_EVAL \( QS \) ' r'energy \(a\.u\.\):\s+(?P<energy>\S+)', self.stdout, 'energy', float, item=-1) energy_reference = -4404.2323 energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_found(r'PROGRAM STOPPED IN', self.stdout), sn.assert_eq( sn.count( sn.extractall(r'(?P<step_count>STEP NUM)', self.stdout, 'step_count')), 10), sn.assert_lt(energy_diff, 1e-4) ]) self.perf_patterns = { 'time': sn.extractsingle(r'^ CP2K(\s+[\d\.]+){4}\s+(?P<perf>\S+)', self.stdout, 'perf', float) } self.maintainers = ['LM'] self.tags = {'scs'} self.strict_check = False self.modules = ['CP2K'] self.extra_resources = {'switches': {'num_switches': 1}}
def assert_energy_diff(self): energy = sn.extractsingle( r'CLASSICAL ENERGY\s+-(?P<result>\S+)', self.stdout, 'result', float) energy_reference = 25.81 energy_diff = sn.abs(energy - energy_reference) return sn.assert_lt(energy_diff, 0.26)
def __init__(self, scale, variant): super().__init__() self.descr = f'QuantumESPRESSO GPU check (version: {scale}, {variant})' self.valid_systems = ['daint:gpu'] self.modules = ['QuantumESPRESSO/6.5a1-CrayPGI-19.10-cuda-10.1'] self.num_gpus_per_node = 1 if scale == 'small': self.valid_systems += ['dom:gpu'] self.num_tasks = 6 energy_reference = -11427.09017176 else: self.num_tasks = 16 energy_reference = -11427.09017179 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 energy = sn.extractsingle(r'!\s+total energy\s+=\s+(?P<energy>\S+) Ry', self.stdout, 'energy', float) energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all( [self.sanity_patterns, sn.assert_lt(energy_diff, 1e-8)]) references = { 'maint': { 'small': { 'dom:gpu': { 'time': (60.0, None, 0.05, 's') }, 'daint:gpu': { 'time': (60.0, None, 0.10, 's') } }, 'large': { 'daint:gpu': { 'time': (60.0, None, 0.10, 's') } } }, 'prod': { 'small': { 'dom:gpu': { 'time': (60.0, None, 0.05, 's') }, 'daint:gpu': { 'time': (60.0, None, 0.10, 's') } }, 'large': { 'daint:gpu': { 'time': (60.0, None, 0.10, 's') } } } } self.reference = references[variant][scale] self.tags |= {'maintenance' if variant == 'maint' else 'production'}
def assert_simulation_success(self): energy = sn.extractsingle(r'!\s+total energy\s+=\s+(?P<energy>\S+) Ry', self.stdout, 'energy', float) energy_diff = sn.abs(energy-self.energy_reference) return sn.all([ sn.assert_found(r'convergence has been achieved', self.stdout), sn.assert_lt(energy_diff, self.energy_tolerance) ])
def assert_energy_diff(self): # OpenMP version of CPMD segfaults # self.variables = { 'OMP_NUM_THREADS' : '8' } energy = sn.extractsingle(r'CLASSICAL ENERGY\s+-(?P<result>\S+)', 'stdout.txt', 'result', float) energy_reference = 25.81 energy_diff = sn.abs(energy - energy_reference) return sn.assert_lt(energy_diff, 0.26)
def __init__(self, lang, extension): super().__init__(lang, extension) self.valid_systems = [ 'daint:gpu', 'dom:gpu', 'kesch:cn', 'tiger:gpu', 'arolla:cn', 'tsa:cn' ] self.num_gpus_per_node = 1 self.num_tasks_per_node = 1 self.system_modules = { 'arolla': ['cuda/10.1.243'], 'daint': ['craype-accel-nvidia60'], 'dom': ['craype-accel-nvidia60'], 'kesch': ['cudatoolkit/8.0.61'], 'tiger': ['craype-accel-nvidia60'], 'tsa': ['cuda/10.1.243'] } sysname = self.current_system.name self.modules += self.system_modules.get(sysname, []) # as long as cuda/9 will not be the default, we will need: if sysname in {'daint', 'kesch'}: self.variables = {'ALLINEA_FORCE_CUDA_VERSION': '8.0'} elif sysname in {'arolla', 'tsa'}: self.variables = {'ALLINEA_FORCE_CUDA_VERSION': '10.1'} self.ddt_options = [ '--offline --output=ddtreport.txt ', '--break-at _jacobi-cuda-kernel.cu:59 --evaluate *residue_d ', '--trace-at _jacobi-cuda-kernel.cu:111,residue' ] self.build_system.cppflags = ['-DUSE_MPI', '-D_CSCS_ITMAX=5'] if self.current_system.name == 'kesch': arch = 'sm_37' self.build_system.ldflags = ['-lm', '-lcudart'] elif self.current_system.name in ['arolla', 'tsa']: arch = 'sm_70' self.build_system.ldflags = [ '-lstdc++', '-lm', '-L$EBROOTCUDA/lib64', '-lcudart' ] else: arch = 'sm_60' self.build_system.ldflags = ['-lstdc++'] self.build_system.options = ['NVCCFLAGS="-g -arch=%s"' % arch] self.sanity_patterns = sn.all([ sn.assert_found('MPI implementation', 'ddtreport.txt'), sn.assert_found('Evaluate', 'ddtreport.txt'), sn.assert_found(r'\*residue_d:', 'ddtreport.txt'), sn.assert_found(r'Debugging\s*:\s*srun\s+%s' % self.executable, 'ddtreport.txt'), sn.assert_lt( sn.abs( sn.extractsingle( r'^tracepoint\s+.*\s+residue:\s+(?P<result>\S+)', 'ddtreport.txt', 'result', float) - 0.25), 1e-5), sn.assert_found(r'Every process in your program has terminated\.', 'ddtreport.txt') ])
def __init__(self): super().__init__() self.descr = ('OpenFOAM-Extend check buoyantBoussinesqSimpleFoam: ' 'hotRoom test') self.executable = 'buoyantBoussinesqSimpleFoam' result = sn.extractall(r'\sglobal\s=\s(?P<res>\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all( sn.map(lambda x: sn.assert_lt(abs(x), 1.e-17), result))
def __init__(self, arch, scale, variant): self.descr = f'NAMD check ({arch}, {variant})' if self.current_system.name == 'pilatus': self.valid_prog_environs = ['cpeIntel'] else: self.valid_prog_environs = ['builtin'] self.modules = ['NAMD'] # Reset sources dir relative to the SCS apps prefix self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'NAMD', 'prod') self.executable = 'namd2' self.use_multithreading = True self.num_tasks_per_core = 2 if scale == 'small': # On Eiger a no-smp NAMD version is the default if self.current_system.name in ['eiger', 'pilatus']: self.num_tasks = 768 self.num_tasks_per_node = 128 else: self.num_tasks = 6 self.num_tasks_per_node = 1 else: if self.current_system.name in ['eiger', 'pilatus']: self.num_tasks = 2048 self.num_tasks_per_node = 128 else: self.num_tasks = 16 self.num_tasks_per_node = 1 energy = sn.avg( sn.extractall(r'ENERGY:([ \t]+\S+){10}[ \t]+(?P<energy>\S+)', self.stdout, 'energy', float)) energy_reference = -2451359.5 energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_eq( sn.count( sn.extractall(r'TIMING: (?P<step_num>\S+) CPU:', self.stdout, 'step_num')), 50), sn.assert_lt(energy_diff, 2720) ]) self.perf_patterns = { 'days_ns': sn.avg( sn.extractall( r'Info: Benchmark time: \S+ CPUs \S+ ' r's/step (?P<days_ns>\S+) days/ns \S+ MB memory', self.stdout, 'days_ns', float)) } self.maintainers = ['CB', 'LM'] self.tags = {'scs', 'external-resources'} self.extra_resources = {'switches': {'num_switches': 1}}
def assert_energy_diff(self): energy_reference = -4.6195 energy = sn.extractsingle( r'\s+500000(\s+\S+){3}\s+(?P<energy>\S+)\s+\S+\s\n', self.stdout, 'energy', float) energy_diff = sn.abs(energy - energy_reference) return sn.all([ sn.assert_found(r'Total wall time:', self.stdout), sn.assert_lt(energy_diff, 6e-4) ])
def __init__(self): super().__init__() self.descr = ('OpenFOAM check of buoyantBoussinesqSimpleFoam: ' 'hotroom tutorial') self.executable = 'buoyantBoussinesqSimpleFoam' residual = sn.extractall(r'\sglobal\s=\s(?P<res>\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all( sn.chain(sn.map(lambda x: sn.assert_lt(x, 1.e-17), residual), [sn.assert_found(r'^\s*[Ee]nd', self.stdout)]))
def __init__(self): super().__init__() self.descr = 'OpenFOAM check of pimpleFoam: tjunction tutorial' residual = sn.extractall(r'Solving for epsilon, \w+\s\w+\s=\s\d.\d+.\s' r'Final residual\s=\s(?P<res>-?\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all(sn.chain( sn.map(lambda x: sn.assert_lt(x, 5.e-05), residual), [sn.assert_found(r'^\s*[Ee]nd', self.stdout)], ))
def __init__(self): self.valid_prog_environs = ['PrgEnv-gnu'] self.valid_systems = [ 'daint:gpu', 'dom:gpu', 'kesch:cn', 'tiger:gpu', 'arolla:cn', 'tsa:cn' ] self.num_gpus_per_node = 1 self.num_tasks_per_node = 1 self.sourcesdir = 'src/Cuda' self.executable = 'cuda-gdb' self.executable_opts = ['-x .in.cudagdb ./cuda_gdb_check'] # unload xalt to avoid runtime error: self.pre_run = ['unset LD_PRELOAD'] if self.current_system.name == 'kesch': self.exclusive_access = True self.modules = ['cudatoolkit/8.0.61'] nvidia_sm = '37' elif self.current_system.name in ['arolla', 'tsa']: self.exclusive_access = True self.modules = ['cuda/10.1.243'] nvidia_sm = '70' else: self.modules = ['craype-accel-nvidia60'] nvidia_sm = '60' self.build_system = 'Make' self.build_system.makefile = 'Makefile_cuda_gdb' self.build_system.cflags = [ '-g', '-D_CSCS_ITMAX=1', '-DUSE_MPI', '-fopenmp' ] self.build_system.cxxflags = ['-g', '-G', '-arch=sm_%s' % nvidia_sm] self.build_system.ldflags = ['-g', '-fopenmp', '-lstdc++'] if self.current_system.name == 'kesch': self.build_system.ldflags = [ '-g', '-fopenmp', '-lcublas', '-lcudart', '-lm' ] elif self.current_system.name in ['arolla', 'tsa']: self.build_system.ldflags += [ '-L$EBROOTCUDA/lib64', '-lcudart', '-lm' ] self.sanity_patterns = sn.all([ sn.assert_found(r'^Breakpoint 1 at .*: file ', self.stdout), sn.assert_found(r'_jacobi-cuda-kernel.cu, line 59\.', self.stdout), sn.assert_found(r'^\(cuda-gdb\) quit', self.stdout), sn.assert_lt( sn.abs( sn.extractsingle(r'\$1\s+=\s+(?P<result>\S+)', self.stdout, 'result', float)), 1e-5) ]) self.maintainers = ['MKr', 'JG'] self.tags = {'production', 'craype'}
def __init__(self): super().__init__() self.descr = 'OpenFOAM-Extend check of simpleFoam: motorbike tutorial' self.executable_opts = ['-parallel'] self.num_tasks = 6 self.num_tasks_per_node = 6 result = sn.extractall( r'time step continuity errors : ' r'\S+\s\S+ = \S+\sglobal = (?P<res>-?\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all( sn.map(lambda x: sn.assert_lt(abs(x), 5.e-04), result))
def __init__(self): self.descr = 'Simple calculation of pi with Spark' self.valid_systems = ['daint:gpu', 'daint:mc'] self.valid_prog_environs = ['PrgEnv-cray'] self.modules = ['analytics'] self.executable = 'start_analytics -t "spark-submit spark_pi.py"' pi_value = sn.extractsingle(r'Pi is roughly\s+(?P<pi>\S+)', self.stdout, 'pi', float) self.sanity_patterns = sn.assert_lt(sn.abs(pi_value - math.pi), 0.01) self.maintainers = ['TM', 'TR'] self.tags = {'craype'}
def __init__(self): super().__init__() self.descr = ('OpenFOAM-Extend check of reconstructPar: ' 'multiRegionHeater test') self.executable_opts = ['-parallel'] self.num_tasks = 4 self.num_tasks_per_node = 4 result = sn.extractall(r'\sglobal\s=\s(?P<res>-?\S+),', self.stdout, 'res', float)[-5:] self.sanity_patterns = sn.all( sn.map(lambda x: sn.assert_lt(abs(x), 1.e-04), result))
def validate_energy(self): energy = sn.avg(sn.extractall( r'ENERGY:([ \t]+\S+){10}[ \t]+(?P<energy>\S+)', self.stdout, 'energy', float) ) energy_reference = -2451359.5 energy_diff = sn.abs(energy - energy_reference) return sn.all([ sn.assert_eq(sn.count(sn.extractall( r'TIMING: (?P<step_num>\S+) CPU:', self.stdout, 'step_num')), 50), sn.assert_lt(energy_diff, 2720) ])
def setup(self, partition, environ, **job_opts): hellos = sn.findall( r'hello world from OS-thread \s*(?P<tid>\d+) on ' r'locality (?P<lid>\d+)', self.stdout) if partition.fullname == 'daint:gpu': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 elif partition.fullname == 'daint:mc': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 36 elif partition.fullname == 'dom:gpu': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 elif partition.fullname == 'dom:mc': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 36 self.executable_opts = ['--hpx:threads=%s' % self.num_cpus_per_task] # https://stellar-group.github.io/hpx/docs/sphinx/branches/master/html/terminology.html#term-locality num_localities = self.num_tasks // self.num_tasks_per_node assert_num_tasks = sn.assert_eq( sn.count(hellos), self.num_tasks * self.num_cpus_per_task) assert_threads = sn.map( lambda x: sn.assert_lt(int(x.group('tid')), self.num_cpus_per_task ), hellos) assert_localities = sn.map( lambda x: sn.assert_lt(int(x.group('lid')), num_localities), hellos) self.sanity_patterns = sn.all( sn.chain([assert_num_tasks], assert_threads, assert_localities)) super().setup(partition, environ, **job_opts)
def assert_hello_world(self): result = sn.findall( r'Hello, World from thread \s*(\d+) out ' r'of \s*(\d+) from process \s*(\d+) out of ' r'\s*(\d+)', self.stdout) num_tasks = sn.getattr(self, 'num_tasks') num_cpus_per_task = sn.getattr(self, 'num_cpus_per_task') def tid(match): return int(match.group(1)) def num_threads(match): return int(match.group(2)) def rank(match): return int(match.group(3)) def num_ranks(match): return int(match.group(4)) return sn.all( sn.chain( [ sn.assert_eq(sn.count(result), num_tasks * num_cpus_per_task) ], sn.map(lambda x: sn.assert_lt(tid(x), num_threads(x)), result), sn.map(lambda x: sn.assert_lt(rank(x), num_ranks(x)), result), sn.map(lambda x: sn.assert_lt(tid(x), num_cpus_per_task), result), sn.map( lambda x: sn.assert_eq(num_threads(x), num_cpus_per_task), result), sn.map(lambda x: sn.assert_lt(rank(x), num_tasks), result), sn.map(lambda x: sn.assert_eq(num_ranks(x), num_tasks), result), ))