def stress_diff(filename, data_ref): ''' Return the difference between obtained and reference stress tensor components''' parsed_output = load_json(filename) if 'stress' in parsed_output['ground_state'] and 'stress' in data_ref['ground_state']: return sn.sum(sn.abs(parsed_output['ground_state']['stress'][i][j] - data_ref['ground_state']['stress'][i][j]) for i in [0, 1, 2] for j in [0, 1, 2]) else: return sn.abs(0)
def forces_diff(filename, data_ref): ''' Return the difference between obtained and reference atomic forces''' parsed_output = load_json(filename) if 'forces' in parsed_output['ground_state'] and 'forces' in data_ref['ground_state']: na = parsed_output['ground_state']['num_atoms'].evaluate() return sn.sum(sn.abs(parsed_output['ground_state']['forces'][i][j] - data_ref['ground_state']['forces'][i][j]) for i in range(na) for j in [0, 1, 2]) else: return sn.abs(0)
def set_sanity_patterns(self): energy = sn.extractsingle(r' Etot\s+=\s+(?P<energy>\S+)', self.output_file, 'energy', float, item=-2) energy_reference = self.ener_ref[self.benchmark][0] energy_diff = sn.abs(energy - energy_reference) ref_ener_diff = sn.abs(self.ener_ref[self.benchmark][0] * self.ener_ref[self.benchmark][1]) self.sanity_patterns = sn.all([ sn.assert_found(r'Final Performance Info:', self.output_file), sn.assert_lt(energy_diff, ref_ener_diff) ])
def assert_energy_readout(self): '''Assert that the obtained energy meets the required tolerance.''' energy = sn.extractsingle(r' Etot\s+=\s+(?P<energy>\S+)', self.output_file, 'energy', float, item=-2) energy_diff = sn.abs(energy - self.energy_ref) ref_ener_diff = sn.abs(self.energy_ref * self.energy_tol) return sn.all([ sn.assert_found(r'Final Performance Info:', self.output_file), sn.assert_lt(energy_diff, ref_ener_diff) ])
def __init__(self): self.valid_prog_environs = ['PrgEnv-gnu'] self.modules = ['LAMMPS'] # Reset sources dir relative to the SCS apps prefix self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'LAMMPS') energy_reference = -4.6195 energy = sn.extractsingle( r'\s+500000(\s+\S+){3}\s+(?P<energy>\S+)\s+\S+\s\n', self.stdout, 'energy', float) self.perf_patterns = { 'perf': sn.extractsingle(r'\s+(?P<perf>\S+) timesteps/s', self.stdout, 'perf', float), } energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_found(r'Total wall time:', self.stdout), sn.assert_lt(energy_diff, 6e-4) ]) self.strict_check = False self.extra_resources = {'switches': {'num_switches': 1}} self.tags = {'scs', 'external-resources'} self.maintainers = ['TR', 'VH']
def scalapack_sanity(number1, number2, expected_value): symbol = f'z{number1}{number2}' pattern = (rf'Z\( {number2}, {number1}\)=' rf'\s+(?P<{symbol}>\S+)') found_value = sn.extractsingle(pattern, self.stdout, symbol, fortran_float) return sn.assert_lt(sn.abs(expected_value - found_value), 1.0e-15)
def assert_energy_diff(self): energy = sn.extractsingle( r'CLASSICAL ENERGY\s+-(?P<result>\S+)', self.stdout, 'result', float) energy_reference = 25.81 energy_diff = sn.abs(energy - energy_reference) return sn.assert_lt(energy_diff, 0.26)
def __init__(self, input_file, output_file): self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'Amber') self.valid_prog_environs = ['builtin'] self.modules = ['Amber'] self.num_tasks = 1 self.num_tasks_per_node = 1 self.num_gpus_per_node = 1 self.executable_opts = ['-O', '-i', input_file, '-o', output_file] self.keep_files = [output_file] self.extra_resources = {'switches': {'num_switches': 1}} energy = sn.extractsingle(r' Etot\s+=\s+(?P<energy>\S+)', output_file, 'energy', float, item=-2) energy_reference = -443246.8 energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_found(r'Final Performance Info:', output_file), sn.assert_lt(energy_diff, 14.9) ]) self.perf_patterns = { 'perf': sn.extractsingle(r'ns/day =\s+(?P<perf>\S+)', output_file, 'perf', float, item=1) } self.maintainers = ['SO', 'VH'] self.tags = {'scs', 'external-resources'}
def scalapack_sanity(number1, number2, expected_value): symbol = 'z{0}{1}'.format(number1, number2) pattern = r'Z\( {0}, {1}\)=\s+(?P<{2}>\S+)'.format( number2, number1, symbol) found_value = sn.extractsingle(pattern, self.stdout, symbol, fortran_float) return sn.assert_lt(sn.abs(expected_value - found_value), 1.0e-15)
def __init__(self): self.valid_prog_environs = ['builtin'] self.executable = 'cp2k.psmp' self.executable_opts = ['H2O-256.inp'] energy = sn.extractsingle( r'\s+ENERGY\| Total FORCE_EVAL \( QS \) ' r'energy \(a\.u\.\):\s+(?P<energy>\S+)', self.stdout, 'energy', float, item=-1) energy_reference = -4404.2323 energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_found(r'PROGRAM STOPPED IN', self.stdout), sn.assert_eq( sn.count( sn.extractall(r'(?P<step_count>STEP NUM)', self.stdout, 'step_count')), 10), sn.assert_lt(energy_diff, 1e-4) ]) self.perf_patterns = { 'time': sn.extractsingle(r'^ CP2K(\s+[\d\.]+){4}\s+(?P<perf>\S+)', self.stdout, 'perf', float) } self.maintainers = ['LM'] self.tags = {'scs'} self.strict_check = False self.modules = ['CP2K'] self.extra_resources = {'switches': {'num_switches': 1}}
def __init__(self, scale, variant): super().__init__() self.descr = f'QuantumESPRESSO GPU check (version: {scale}, {variant})' self.valid_systems = ['daint:gpu'] self.modules = ['QuantumESPRESSO/6.5a1-CrayPGI-19.10-cuda-10.1'] self.num_gpus_per_node = 1 if scale == 'small': self.valid_systems += ['dom:gpu'] self.num_tasks = 6 energy_reference = -11427.09017176 else: self.num_tasks = 16 energy_reference = -11427.09017179 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 energy = sn.extractsingle(r'!\s+total energy\s+=\s+(?P<energy>\S+) Ry', self.stdout, 'energy', float) energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all( [self.sanity_patterns, sn.assert_lt(energy_diff, 1e-8)]) references = { 'maint': { 'small': { 'dom:gpu': { 'time': (60.0, None, 0.05, 's') }, 'daint:gpu': { 'time': (60.0, None, 0.10, 's') } }, 'large': { 'daint:gpu': { 'time': (60.0, None, 0.10, 's') } } }, 'prod': { 'small': { 'dom:gpu': { 'time': (60.0, None, 0.05, 's') }, 'daint:gpu': { 'time': (60.0, None, 0.10, 's') } }, 'large': { 'daint:gpu': { 'time': (60.0, None, 0.10, 's') } } } } self.reference = references[variant][scale] self.tags |= {'maintenance' if variant == 'maint' else 'production'}
def stress_diff(ostream, ostream_ref): ''' Return the difference between obtained and reference stress tensor components''' stress = get_stress(ostream) stress_ref = get_stress(ostream_ref) return sn.max( sn.abs(stress_ref[i][j] - stress[i][j]) for i in range(2) for j in range(2))
def assert_simulation_success(self): energy = sn.extractsingle(r'!\s+total energy\s+=\s+(?P<energy>\S+) Ry', self.stdout, 'energy', float) energy_diff = sn.abs(energy-self.energy_reference) return sn.all([ sn.assert_found(r'convergence has been achieved', self.stdout), sn.assert_lt(energy_diff, self.energy_tolerance) ])
def assert_energy_diff(self): # OpenMP version of CPMD segfaults # self.variables = { 'OMP_NUM_THREADS' : '8' } energy = sn.extractsingle(r'CLASSICAL ENERGY\s+-(?P<result>\S+)', 'stdout.txt', 'result', float) energy_reference = 25.81 energy_diff = sn.abs(energy - energy_reference) return sn.assert_lt(energy_diff, 0.26)
def __init__(self, lang, extension): super().__init__(lang, extension) self.valid_systems = [ 'daint:gpu', 'dom:gpu', 'kesch:cn', 'tiger:gpu', 'arolla:cn', 'tsa:cn' ] self.num_gpus_per_node = 1 self.num_tasks_per_node = 1 self.system_modules = { 'arolla': ['cuda/10.1.243'], 'daint': ['craype-accel-nvidia60'], 'dom': ['craype-accel-nvidia60'], 'kesch': ['cudatoolkit/8.0.61'], 'tiger': ['craype-accel-nvidia60'], 'tsa': ['cuda/10.1.243'] } sysname = self.current_system.name self.modules += self.system_modules.get(sysname, []) # as long as cuda/9 will not be the default, we will need: if sysname in {'daint', 'kesch'}: self.variables = {'ALLINEA_FORCE_CUDA_VERSION': '8.0'} elif sysname in {'arolla', 'tsa'}: self.variables = {'ALLINEA_FORCE_CUDA_VERSION': '10.1'} self.ddt_options = [ '--offline --output=ddtreport.txt ', '--break-at _jacobi-cuda-kernel.cu:59 --evaluate *residue_d ', '--trace-at _jacobi-cuda-kernel.cu:111,residue' ] self.build_system.cppflags = ['-DUSE_MPI', '-D_CSCS_ITMAX=5'] if self.current_system.name == 'kesch': arch = 'sm_37' self.build_system.ldflags = ['-lm', '-lcudart'] elif self.current_system.name in ['arolla', 'tsa']: arch = 'sm_70' self.build_system.ldflags = [ '-lstdc++', '-lm', '-L$EBROOTCUDA/lib64', '-lcudart' ] else: arch = 'sm_60' self.build_system.ldflags = ['-lstdc++'] self.build_system.options = ['NVCCFLAGS="-g -arch=%s"' % arch] self.sanity_patterns = sn.all([ sn.assert_found('MPI implementation', 'ddtreport.txt'), sn.assert_found('Evaluate', 'ddtreport.txt'), sn.assert_found(r'\*residue_d:', 'ddtreport.txt'), sn.assert_found(r'Debugging\s*:\s*srun\s+%s' % self.executable, 'ddtreport.txt'), sn.assert_lt( sn.abs( sn.extractsingle( r'^tracepoint\s+.*\s+residue:\s+(?P<result>\S+)', 'ddtreport.txt', 'result', float) - 0.25), 1e-5), sn.assert_found(r'Every process in your program has terminated\.', 'ddtreport.txt') ])
def __init__(self, arch, scale, variant): self.descr = f'NAMD check ({arch}, {variant})' if self.current_system.name == 'pilatus': self.valid_prog_environs = ['cpeIntel'] else: self.valid_prog_environs = ['builtin'] self.modules = ['NAMD'] # Reset sources dir relative to the SCS apps prefix self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'NAMD', 'prod') self.executable = 'namd2' self.use_multithreading = True self.num_tasks_per_core = 2 if scale == 'small': # On Eiger a no-smp NAMD version is the default if self.current_system.name in ['eiger', 'pilatus']: self.num_tasks = 768 self.num_tasks_per_node = 128 else: self.num_tasks = 6 self.num_tasks_per_node = 1 else: if self.current_system.name in ['eiger', 'pilatus']: self.num_tasks = 2048 self.num_tasks_per_node = 128 else: self.num_tasks = 16 self.num_tasks_per_node = 1 energy = sn.avg( sn.extractall(r'ENERGY:([ \t]+\S+){10}[ \t]+(?P<energy>\S+)', self.stdout, 'energy', float)) energy_reference = -2451359.5 energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_eq( sn.count( sn.extractall(r'TIMING: (?P<step_num>\S+) CPU:', self.stdout, 'step_num')), 50), sn.assert_lt(energy_diff, 2720) ]) self.perf_patterns = { 'days_ns': sn.avg( sn.extractall( r'Info: Benchmark time: \S+ CPUs \S+ ' r's/step (?P<days_ns>\S+) days/ns \S+ MB memory', self.stdout, 'days_ns', float)) } self.maintainers = ['CB', 'LM'] self.tags = {'scs', 'external-resources'} self.extra_resources = {'switches': {'num_switches': 1}}
def assert_energy_diff(self): energy_reference = -4.6195 energy = sn.extractsingle( r'\s+500000(\s+\S+){3}\s+(?P<energy>\S+)\s+\S+\s\n', self.stdout, 'energy', float) energy_diff = sn.abs(energy - energy_reference) return sn.all([ sn.assert_found(r'Total wall time:', self.stdout), sn.assert_lt(energy_diff, 6e-4) ])
def __init__(self): self.valid_prog_environs = ['PrgEnv-gnu'] self.valid_systems = [ 'daint:gpu', 'dom:gpu', 'kesch:cn', 'tiger:gpu', 'arolla:cn', 'tsa:cn' ] self.num_gpus_per_node = 1 self.num_tasks_per_node = 1 self.sourcesdir = 'src/Cuda' self.executable = 'cuda-gdb' self.executable_opts = ['-x .in.cudagdb ./cuda_gdb_check'] # unload xalt to avoid runtime error: self.pre_run = ['unset LD_PRELOAD'] if self.current_system.name == 'kesch': self.exclusive_access = True self.modules = ['cudatoolkit/8.0.61'] nvidia_sm = '37' elif self.current_system.name in ['arolla', 'tsa']: self.exclusive_access = True self.modules = ['cuda/10.1.243'] nvidia_sm = '70' else: self.modules = ['craype-accel-nvidia60'] nvidia_sm = '60' self.build_system = 'Make' self.build_system.makefile = 'Makefile_cuda_gdb' self.build_system.cflags = [ '-g', '-D_CSCS_ITMAX=1', '-DUSE_MPI', '-fopenmp' ] self.build_system.cxxflags = ['-g', '-G', '-arch=sm_%s' % nvidia_sm] self.build_system.ldflags = ['-g', '-fopenmp', '-lstdc++'] if self.current_system.name == 'kesch': self.build_system.ldflags = [ '-g', '-fopenmp', '-lcublas', '-lcudart', '-lm' ] elif self.current_system.name in ['arolla', 'tsa']: self.build_system.ldflags += [ '-L$EBROOTCUDA/lib64', '-lcudart', '-lm' ] self.sanity_patterns = sn.all([ sn.assert_found(r'^Breakpoint 1 at .*: file ', self.stdout), sn.assert_found(r'_jacobi-cuda-kernel.cu, line 59\.', self.stdout), sn.assert_found(r'^\(cuda-gdb\) quit', self.stdout), sn.assert_lt( sn.abs( sn.extractsingle(r'\$1\s+=\s+(?P<result>\S+)', self.stdout, 'result', float)), 1e-5) ]) self.maintainers = ['MKr', 'JG'] self.tags = {'production', 'craype'}
def __init__(self): self.descr = 'Simple calculation of pi with Spark' self.valid_systems = ['daint:gpu', 'daint:mc'] self.valid_prog_environs = ['PrgEnv-cray'] self.modules = ['analytics'] self.executable = 'start_analytics -t "spark-submit spark_pi.py"' pi_value = sn.extractsingle(r'Pi is roughly\s+(?P<pi>\S+)', self.stdout, 'pi', float) self.sanity_patterns = sn.assert_lt(sn.abs(pi_value - math.pi), 0.01) self.maintainers = ['TM', 'TR'] self.tags = {'craype'}
def validate_energy(self): energy = sn.avg(sn.extractall( r'ENERGY:([ \t]+\S+){10}[ \t]+(?P<energy>\S+)', self.stdout, 'energy', float) ) energy_reference = -2451359.5 energy_diff = sn.abs(energy - energy_reference) return sn.all([ sn.assert_eq(sn.count(sn.extractall( r'TIMING: (?P<step_num>\S+) CPU:', self.stdout, 'step_num')), 50), sn.assert_lt(energy_diff, 2720) ])
def __init__(self): self.descr = f'Simple calculation of pi with {self.variant}' self.valid_systems = ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc'] self.valid_prog_environs = ['builtin'] self.modules = ['Spark'] self.prerun_cmds = ['start-all.sh'] self.postrun_cmds = ['stop-all.sh'] self.num_tasks = 3 self.num_tasks_per_node = 1 pi_value = sn.extractsingle(r'Pi is roughly\s+(?P<pi>\S+)', self.stdout, 'pi', float) self.sanity_patterns = sn.assert_lt(sn.abs(pi_value - math.pi), 0.01) self.maintainers = ['TM', 'RS'] self.tags = {'production'}
def __init__(self, version, variant): super().__init__() self.name = 'namd_%s_%s_check' % (version, variant) self.descr = 'NAMD check (%s, %s)' % (version, variant) self.valid_prog_environs = ['PrgEnv-intel'] self.modules = ['NAMD'] # Reset sources dir relative to the SCS apps prefix self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'NAMD', 'prod') self.executable = 'namd2' self.use_multithreading = True self.num_tasks_per_core = 2 if self.current_system.name == 'dom': self.num_tasks = 6 self.num_tasks_per_node = 1 else: self.num_tasks = 16 self.num_tasks_per_node = 1 energy = sn.avg( sn.extractall(r'ENERGY:(\s+\S+){10}\s+(?P<energy>\S+)', self.stdout, 'energy', float)) energy_reference = -2451359.5 energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_eq( sn.count( sn.extractall(r'TIMING: (?P<step_num>\S+) CPU:', self.stdout, 'step_num')), 50), sn.assert_lt(energy_diff, 2720) ]) self.perf_patterns = { 'days_ns': sn.avg( sn.extractall( 'Info: Benchmark time: \S+ CPUs \S+ ' 's/step (?P<days_ns>\S+) days/ns \S+ MB memory', self.stdout, 'days_ns', float)) } self.maintainers = ['CB', 'LM'] self.tags = {'scs'} self.strict_check = False self.extra_resources = {'switches': {'num_switches': 1}}
def __init__(self): super().__init__() self.descr = 'OpenFOAM check of simpleFoam: motorbike tutorial' self.executable_opts = ['-parallel'] self.num_tasks = 6 self.num_tasks_per_node = 6 self.sanity_patterns = sn.all([ sn.assert_found('Finalising parallel run', self.stdout), sn.assert_found(r'^\s*[Ee]nd', self.stdout), sn.assert_lt(sn.abs(sn.extractsingle( r'time step continuity errors : \S+\s\S+ = \S+\s' r'global = (?P<res>-?\S+),', self.stdout, 'res', float)), 1.e-04) ])
def __init__(self): self.descr = 'Simple calculation of pi with Spark' self.valid_systems = ['daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc'] self.valid_prog_environs = ['PrgEnv-gnu'] self.modules = ['Spark'] self.sourcesdir = None self.pre_run = ['start-all.sh'] self.post_run = ['stop-all.sh'] self.num_tasks = 2 self.num_tasks_per_node = 1 pi_value = sn.extractsingle(r'Pi is roughly\s+(?P<pi>\S+)', self.stdout, 'pi', float) self.sanity_patterns = sn.assert_lt(sn.abs(pi_value - math.pi), 0.01) self.maintainers = ['TM', 'TR'] self.tags = {'production'}
def __init__(self, scale): super().__init__() self.descr = 'CPMD check (C4H6 metadynamics)' self.maintainers = ['AJ', 'LM'] self.tags = {'production'} self.valid_systems = ['daint:gpu'] if scale == 'small': self.num_tasks = 9 self.valid_systems += ['dom:gpu'] else: self.num_tasks = 16 self.time_limit = (0, 20, 0) self.num_tasks_per_node = 1 self.valid_prog_environs = ['PrgEnv-intel'] self.modules = ['CPMD'] self.executable = 'cpmd.x' self.executable_opts = ['ana_c4h6.in > stdout.txt'] self.readonly_files = ['ana_c4h6.in', 'C_MT_BLYP', 'H_MT_BLYP'] self.use_multithreading = True self.strict_check = False self.extra_resources = {'switches': {'num_switches': 1}} # OpenMP version of CPMD segfaults # self.variables = { 'OMP_NUM_THREADS' : '8' } energy = sn.extractsingle(r'CLASSICAL ENERGY\s+-(?P<result>\S+)', 'stdout.txt', 'result', float) energy_reference = 25.81 energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.assert_lt(energy_diff, 0.26) self.perf_patterns = { 'time': sn.extractsingle(r'^ cpmd(\s+[\d\.]+){3}\s+(?P<perf>\S+)', 'stdout.txt', 'perf', float) } if scale == 'small': self.reference = { 'daint:gpu': { 'time': (285.5, None, 0.20, 's') }, 'dom:gpu': { 'time': (332.0, None, 0.15, 's') } } else: self.reference = {'daint:gpu': {'time': (245.0, None, 0.59, 's')}}
def assert_energy_readout(self): '''Assert that the obtained energy meets the benchmark tolerances.''' energy_fn_name = f'energy_{util.toalphanum(self.__bench).lower()}' energy_fn = getattr(self, energy_fn_name, None) sn.assert_true( energy_fn is not None, msg=(f"cannot extract energy for benchmark {self.__bench!r}: " f"please define a member function '{energy_fn_name}()'") ).evaluate() energy = energy_fn() energy_diff = sn.abs(energy - self.energy_ref) return sn.all([ sn.assert_found('Finished mdrun', 'md.log'), sn.assert_reference(energy, self.energy_ref, -self.energy_tol, self.energy_tol) ])
def assert_energy_diff(self): energy = sn.extractsingle( r'\s+ENERGY\| Total FORCE_EVAL \( QS \) ' r'energy [\[\(]a\.u\.[\]\)]:\s+(?P<energy>\S+)', self.stdout, 'energy', float, item=-1) energy_reference = -4404.2323 energy_diff = sn.abs(energy - energy_reference) return sn.all([ sn.assert_found(r'PROGRAM STOPPED IN', self.stdout), sn.assert_eq( sn.count( sn.extractall(r'(?i)(?P<step_count>STEP NUMBER)', self.stdout, 'step_count')), 10), sn.assert_lt(energy_diff, 1e-4) ])
def __init__(self): super().__init__() self.valid_prog_environs = ['PrgEnv-gnu'] self.valid_systems = ['daint:gpu', 'dom:gpu', 'kesch:cn'] self.num_gpus_per_node = 1 self.num_tasks_per_node = 1 self.sourcesdir = 'src/Cuda' self.executable = 'cuda-gdb cuda_gdb_check' if self.current_system.name == 'kesch': self.exclusive_access = True self.modules = ['cudatoolkit/8.0.61'] else: self.modules = ['craype-accel-nvidia60'] self.build_system = 'Make' self.build_system.makefile = 'Makefile_cuda_gdb' self.build_system.cflags = [ '-g', '-D_CSCS_ITMAX=1', '-DUSE_MPI', '-fopenmp' ] nvidia_sm = '37' if self.current_system.name == 'kesch' else '60' self.build_system.cxxflags = ['-g', '-G', '-arch=sm_%s' % nvidia_sm] self.build_system.ldflags = ['-g', '-fopenmp', '-lstdc++'] # FIXME: workaround until the kesch programming environment is fixed if self.current_system.name == 'kesch': self.build_system.ldflags = [ '-g', '-fopenmp', '-lcublas', '-lcudart', '-lm' ] self.sanity_patterns = sn.all([ sn.assert_found(r'^\(cuda-gdb\) Breakpoint 1 at .*: file ', self.stdout), sn.assert_found(r'_jacobi-cuda-kernel.cu, line 59\.', self.stdout), sn.assert_found(r'^\(cuda-gdb\) Starting program:', self.stdout), sn.assert_found(r'^\(cuda-gdb\) quit', self.stdout), sn.assert_lt( sn.abs( sn.extractsingle( r'^\(cuda-gdb\)\s+\$1\s+=\s+(?P<result>\S+)', self.stdout, 'result', float)), 1e-5) ]) self.maintainers = ['MK', 'JG'] self.tags = {'production'}
def forces_diff(ostream, ostream_ref): ''' Return the difference between obtained and reference atomic forces''' forces = get_forces(ostream) forces_ref = get_forces(ostream_ref) na = 0 for e in forces: na += 1 na_ref = 0 for e in forces_ref: na_ref += 1 sn.assert_eq(na, na_ref, msg='Wrong length of forces array: {0} != {1}').evaluate() return sn.max( sn.abs(forces[i][j] - forces_ref[i][j]) for i in range(na) for j in range(2))
def __init__(self, arch, flavor): super().__init__() self.descr = 'NAMD check (%s)' % (arch) if flavor == 'multicore': self.valid_prog_environs = ['intel-2016.4', 'intel-2018.3'] if flavor == 'verbs': self.valid_prog_environs = ['intel-2016.4', 'intel-2018.3'] self.modules = ['namd-%s' % flavor] # Reset sources dir relative to the SCS apps prefix self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'NAMD') self.executable = 'namd2' self.use_multithreading = True energy = sn.avg( sn.extractall(r'^ENERGY:(\s+\S+){10}\s+(?P<energy>\S+)', self.stdout, 'energy', float)) energy_reference = -2451359.5 energy_diff = sn.abs(energy - energy_reference) self.sanity_patterns = sn.all([ sn.assert_eq( sn.count( sn.extractall(r'TIMING: (?P<step_num>\S+) CPU:', self.stdout, 'step_num')), 25), sn.assert_lt(energy_diff, 2720) ]) self.perf_patterns = { 'days_ns': sn.avg( sn.extractall( 'Info: Benchmark time: \S+ CPUs \S+ ' 's/step (?P<days_ns>\S+) days/ns \S+ MB memory', self.stdout, 'days_ns', float)) } self.maintainers = ['CB', 'LM'] self.tags = {'scs', 'external-resources'} self.strict_check = False self.extra_resources = {'switches': {'num_switches': 1}}