def setup(self, partition, environ, **job_opts): result = sn.findall( r'Hello World from thread \s*(\d+) out ' r'of \s*(\d+) from process \s*(\d+) out of ' r'\s*(\d+)', self.stdout) self.sanity_patterns = sn.all( sn.chain( [ sn.assert_eq(sn.count(result), self.num_tasks * self.num_cpus_per_task) ], sn.map( lambda x: sn.assert_lt(int(x.group(1)), int(x.group(2))), result), sn.map( lambda x: sn.assert_lt(int(x.group(3)), int(x.group(4))), result), sn.map( lambda x: sn.assert_lt(int(x.group(1)), self. num_cpus_per_task), result), sn.map( lambda x: sn.assert_eq(int(x.group(2)), self. num_cpus_per_task), result), sn.map(lambda x: sn.assert_lt(int(x.group(3)), self.num_tasks), result), sn.map(lambda x: sn.assert_eq(int(x.group(4)), self.num_tasks), result), )) self.perf_patterns = { 'compilation_time': sn.getattr(self, 'compilation_time_seconds') } self.reference = {'*': {'compilation_time': (60, None, 0.1)}} super().setup(partition, environ, **job_opts)
def __init__(self): super().__init__() self.valid_systems = ['dom:gpu', 'daint:gpu'] self.valid_prog_environs = ['PrgEnv-pgi'] self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'RRTMGP') self.tags = {'external-resources'} self.prebuild_cmd = ['cp build/Makefile.conf.dom build/Makefile.conf'] self.executable = 'python' self.executable_opts = [ 'util/scripts/run_tests.py', '--verbose', '--rel_diff_cut 1e-13', '--root ..', '--test ${INIFILE}_ncol-${NCOL}.ini' ] self.pre_run = [ 'pwd', 'module load netcdf-python/1.4.1-CrayGNU-19.06-python2', 'cd test' ] self.modules = ['craype-accel-nvidia60', 'cray-netcdf'] self.variables = {'NCOL': '500', 'INIFILE': 'openacc-solvers-lw'} values = sn.extractall(r'.*\[\S+, (\S+)\]', self.stdout, 1, float) self.sanity_patterns = sn.all( sn.chain( [sn.assert_gt(sn.count(values), 0, msg='regex not matched')], sn.map(lambda x: sn.assert_lt(x, 1e-5), values))) self.maintainers = ['WS', 'VK']
def __init__(self, variant, lang, linkage): self.linkage = linkage self.variables = {'CRAYPE_LINK_TYPE': linkage} self.prgenv_flags = {} self.lang_names = {'c': 'C', 'cpp': 'C++', 'f90': 'Fortran 90'} self.descr = self.lang_names[lang] + ' Hello World' self.sourcepath = 'hello_world' self.build_system = 'SingleSource' self.valid_systems = ['ubelix:compute', 'ubelix:gpu'] self.valid_prog_environs = ['foss', 'intel'] self.compilation_time_seconds = None result = sn.findall( r'Hello World from thread \s*(\d+) out ' r'of \s*(\d+) from process \s*(\d+) out of ' r'\s*(\d+)', self.stdout) num_tasks = sn.getattr(self, 'num_tasks') num_cpus_per_task = sn.getattr(self, 'num_cpus_per_task') def tid(match): return int(match.group(1)) def num_threads(match): return int(match.group(2)) def rank(match): return int(match.group(3)) def num_ranks(match): return int(match.group(4)) self.sanity_patterns = sn.all( sn.chain( [ sn.assert_eq(sn.count(result), num_tasks * num_cpus_per_task) ], sn.map(lambda x: sn.assert_lt(tid(x), num_threads(x)), result), sn.map(lambda x: sn.assert_lt(rank(x), num_ranks(x)), result), sn.map(lambda x: sn.assert_lt(tid(x), num_cpus_per_task), result), sn.map( lambda x: sn.assert_eq(num_threads(x), num_cpus_per_task), result), sn.map(lambda x: sn.assert_lt(rank(x), num_tasks), result), sn.map(lambda x: sn.assert_eq(num_ranks(x), num_tasks), result), )) self.perf_patterns = { 'compilation_time': sn.getattr(self, 'compilation_time_seconds') } self.reference = {'*': {'compilation_time': (60, None, 0.1, 's')}} self.maintainers = ['VH', 'EK'] self.tags = {'production', 'prgenv'}
def __init__(self): super().__init__() self.descr = ('OpenFOAM check of buoyantBoussinesqSimpleFoam: ' 'hotroom tutorial') self.executable = 'buoyantBoussinesqSimpleFoam' residual = sn.extractall(r'\sglobal\s=\s(?P<res>\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all( sn.chain(sn.map(lambda x: sn.assert_lt(x, 1.e-17), residual), [sn.assert_found(r'^\s*[Ee]nd', self.stdout)]))
def __init__(self): super().__init__() self.descr = 'OpenFOAM check of pimpleFoam: tjunction tutorial' residual = sn.extractall(r'Solving for epsilon, \w+\s\w+\s=\s\d.\d+.\s' r'Final residual\s=\s(?P<res>-?\S+),', self.stdout, 'res', float) self.sanity_patterns = sn.all(sn.chain( sn.map(lambda x: sn.assert_lt(x, 5.e-05), residual), [sn.assert_found(r'^\s*[Ee]nd', self.stdout)], ))
def __init__(self): super().__init__() self.descr = 'OpenFOAM check of potentialFoam: motorbike tutorial' self.executable_opts = ['-parallel'] self.num_tasks = 6 self.num_tasks_per_node = 6 residual = sn.extractall(r'Final residual = (?P<res>-?\S+),', self.stdout, 'res', float)[-5:] self.sanity_patterns = sn.all( sn.chain(sn.map(lambda x: sn.assert_lt(x, 1.e-07), residual), [ sn.assert_eq(5, sn.count(residual)), sn.assert_found('Finalising parallel run', self.stdout), sn.assert_found(r'^\s*[Ee]nd', self.stdout) ]))
def __init__(self): super().__init__() self.descr = ('OpenFOAM check of chtMultiRegionSimpleFoam:' ' heatexchanger tutorial') self.executable_opts = ['-parallel'] self.num_tasks = 4 self.num_tasks_per_node = 4 residual = sn.extractall(r'\sglobal\s=\s(?P<res>\S+),', self.stdout, 'res', float)[-10:] self.sanity_patterns = sn.all(sn.chain( sn.map(lambda x: sn.assert_lt(x, 1.e-03), residual), [sn.assert_eq(10, sn.count(residual)), sn.assert_found('Finalising parallel run', self.stdout), sn.assert_found(r'^\s*[Ee]nd', self.stdout)]))
def set_sanity(self): hellos = sn.findall(r'hello world from OS-thread \s*(?P<tid>\d+) on ' r'locality (?P<lid>\d+)', self.stdout) # https://stellar-group.github.io/hpx/docs/sphinx/branches/master/html/terminology.html#term-locality num_localities = self.num_tasks // self.num_tasks_per_node assert_num_tasks = sn.assert_eq(sn.count(hellos), self.num_tasks*self.num_cpus_per_task) assert_threads = sn.map(lambda x: sn.assert_lt(int(x.group('tid')), self.num_cpus_per_task), hellos) assert_localities = sn.map(lambda x: sn.assert_lt(int(x.group('lid')), num_localities), hellos) self.sanity_patterns = sn.all(sn.chain([assert_num_tasks], assert_threads, assert_localities))
def setup(self, partition, environ, **job_opts): result = sn.findall( r'(?P<lid>\d+),\s*(?P<tid>\d+),' r'\s*(?P<time>(\d+)?.?\d+),' r'\s*(?P<pts>\d+),' r'\s*(?P<parts>\d+),' r'\s*(?P<steps>\d+)', self.stdout) if partition.fullname == 'daint:gpu': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 elif partition.fullname == 'daint:mc': self.num_tasks = 4 self.num_tasks_per_node = 2 self.num_cpus_per_task = 18 self.num_tasks_per_socket = 1 elif partition.fullname == 'dom:gpu': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 elif partition.fullname == 'dom:mc': self.num_tasks = 4 self.num_tasks_per_node = 2 self.num_cpus_per_task = 18 self.num_tasks_per_socket = 1 self.executable_opts += ['--hpx:threads=%s' % self.num_cpus_per_task] num_threads = self.num_tasks * self.num_cpus_per_task assert_num_tasks = sn.map( lambda x: sn.assert_eq(int(x.group('lid')), self.num_tasks), result) assert_num_threads = sn.map( lambda x: sn.assert_eq(int(x.group('tid')), num_threads), result) assert_num_points = sn.map( lambda x: sn.assert_eq(x.group('pts'), self.nx_opts), result) assert_num_parts = sn.map( lambda x: sn.assert_eq(x.group('parts'), self.np_opts), result) assert_num_steps = sn.map( lambda x: sn.assert_eq(x.group('steps'), self.nt_opts), result) self.sanity_patterns = sn.all( sn.chain(assert_num_tasks, assert_num_threads, assert_num_points, assert_num_parts, assert_num_steps)) super().setup(partition, environ, **job_opts)
def set_sanity(self): result = sn.findall(r'(?P<tid>\d+),\s*(?P<time>(\d+)?.?\d+),' r'\s*(?P<pts>\d+),\s*(?P<parts>\d+),' r'\s*(?P<steps>\d+)', self.stdout) assert_num_threads = sn.map(lambda x: sn.assert_eq( int(x.group('tid')), self.num_cpus_per_task), result) assert_num_points = sn.map(lambda x: sn.assert_eq( x.group('pts'), self.nx_opts), result) assert_num_parts = sn.map(lambda x: sn.assert_eq(x.group('parts'), self.np_opts), result) assert_num_steps = sn.map(lambda x: sn.assert_eq(x.group('steps'), self.nt_opts), result) self.sanity_patterns = sn.all(sn.chain(assert_num_threads, assert_num_points, assert_num_parts, assert_num_steps))
def setup(self, partition, environ, **job_opts): hellos = sn.findall( r'hello world from OS-thread \s*(?P<tid>\d+) on ' r'locality (?P<lid>\d+)', self.stdout) if partition.fullname == 'daint:gpu': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 elif partition.fullname == 'daint:mc': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 36 elif partition.fullname == 'dom:gpu': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 elif partition.fullname == 'dom:mc': self.num_tasks = 2 self.num_tasks_per_node = 1 self.num_cpus_per_task = 36 self.executable_opts = ['--hpx:threads=%s' % self.num_cpus_per_task] # https://stellar-group.github.io/hpx/docs/sphinx/branches/master/html/terminology.html#term-locality num_localities = self.num_tasks // self.num_tasks_per_node assert_num_tasks = sn.assert_eq( sn.count(hellos), self.num_tasks * self.num_cpus_per_task) assert_threads = sn.map( lambda x: sn.assert_lt(int(x.group('tid')), self.num_cpus_per_task ), hellos) assert_localities = sn.map( lambda x: sn.assert_lt(int(x.group('lid')), num_localities), hellos) self.sanity_patterns = sn.all( sn.chain([assert_num_tasks], assert_threads, assert_localities)) super().setup(partition, environ, **job_opts)
def assert_hello_world(self): result = sn.findall( r'Hello, World from thread \s*(\d+) out ' r'of \s*(\d+) from process \s*(\d+) out of ' r'\s*(\d+)', self.stdout) num_tasks = sn.getattr(self, 'num_tasks') num_cpus_per_task = sn.getattr(self, 'num_cpus_per_task') def tid(match): return int(match.group(1)) def num_threads(match): return int(match.group(2)) def rank(match): return int(match.group(3)) def num_ranks(match): return int(match.group(4)) return sn.all( sn.chain( [ sn.assert_eq(sn.count(result), num_tasks * num_cpus_per_task) ], sn.map(lambda x: sn.assert_lt(tid(x), num_threads(x)), result), sn.map(lambda x: sn.assert_lt(rank(x), num_ranks(x)), result), sn.map(lambda x: sn.assert_lt(tid(x), num_cpus_per_task), result), sn.map( lambda x: sn.assert_eq(num_threads(x), num_cpus_per_task), result), sn.map(lambda x: sn.assert_lt(rank(x), num_tasks), result), sn.map(lambda x: sn.assert_eq(num_ranks(x), num_tasks), result), ))
def eval_sanity(self): output_files = [] output_files = [ file for file in os.listdir(self.stagedir) if file.startswith('output-') ] num_greasy_tasks = len(output_files) failure_msg = (f'Requested {self.num_greasy_tasks} task(s), but ' f'executed only {num_greasy_tasks} tasks(s)') sn.evaluate( sn.assert_eq(num_greasy_tasks, self.num_greasy_tasks, msg=failure_msg)) num_tasks = sn.getattr(self, 'nranks_per_worker') num_cpus_per_task = sn.getattr(self, 'num_cpus_per_task') def tid(match): return int(match.group(1)) def num_threads(match): return int(match.group(2)) def rank(match): return int(match.group(3)) def num_ranks(match): return int(match.group(4)) for output_file in output_files: result = sn.findall( r'Hello, World from thread \s*(\d+) out ' r'of \s*(\d+) from process \s*(\d+) out of ' r'\s*(\d+)', output_file) failure_msg = (f'Found {sn.count(result)} Hello, World... ' f'pattern(s) but expected ' f'{num_tasks * num_cpus_per_task} pattern(s) ' f'inside the output file {output_file}') sn.evaluate( sn.assert_eq(sn.count(result), num_tasks * num_cpus_per_task, msg=failure_msg)) sn.evaluate( sn.all( sn.chain( sn.map( lambda x: sn.assert_lt( tid(x), num_threads(x), msg=(f'Found {tid(x)} threads rather than ' f'{num_threads(x)}')), result), sn.map( lambda x: sn.assert_lt( rank(x), num_ranks(x), msg =(f'Rank id {rank(x)} is not lower than the ' f'number of ranks {self.nranks_per_worker} ' f'in output file')), result), sn.map( lambda x: sn.assert_lt( tid(x), self.num_cpus_per_task, msg=(f'Rank id {tid(x)} is not lower than the ' f'number of cpus per task ' f'{self.num_cpus_per_task} in output ' f'file {output_file}')), result), sn.map( lambda x: sn.assert_eq( num_threads(x), num_cpus_per_task, msg =(f'Found {num_threads(x)} threads rather than ' f'{self.num_cpus_per_task} in output file ' f'{output_file}')), result), sn.map( lambda x: sn.assert_lt( rank(x), num_tasks, msg=( f'Found {rank(x)} threads rather than ' f'{self.num_cpus_per_task} in output file ' f'{output_file}')), result), sn.map( lambda x: sn.assert_eq( num_ranks(x), num_tasks, msg=(f'Number of ranks {num_ranks(x)} is not ' f'equal to {self.nranks_per_worker} in ' f'output file {output_file}')), result)))) sn.evaluate(sn.assert_found(r'Finished greasing', self.greasy_logfile)) sn.evaluate( sn.assert_found((f'INFO: Summary of {self.num_greasy_tasks} ' f'tasks: ' f'{self.num_greasy_tasks} OK, ' f'0 FAILED, ' f'0 CANCELLED, ' fr'0 INVALID\.'), self.greasy_logfile)) return True
def test_chain(self): list1 = ['A', 'B', 'C'] list2 = ['D', 'E', 'F'] chain1 = evaluate(sn.chain(make_deferrable(list1), list2)) chain2 = itertools.chain(list1, list2) self.assertTrue(all((a == b for a, b in zip(chain1, chain2))))
def test_chain(): list1 = ['A', 'B', 'C'] list2 = ['D', 'E', 'F'] chain1 = sn.evaluate(sn.chain(sn.defer(list1), list2)) chain2 = itertools.chain(list1, list2) assert all((a == b for a, b in zip(chain1, chain2)))
def __init__(self, variant, lang, linkage): self.linkage = linkage self.variables = {'CRAYPE_LINK_TYPE': linkage} self.prgenv_flags = {} self.lang_names = {'c': 'C', 'cpp': 'C++', 'f90': 'Fortran 90'} self.descr = f'{self.lang_names[lang]} Hello World' self.sourcepath = 'hello_world' self.build_system = 'SingleSource' self.valid_systems = [ 'daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc', 'kesch:cn', 'tiger:gpu', 'arolla:cn', 'arolla:pn', 'tsa:cn', 'tsa:pn' ] self.valid_prog_environs = [ 'PrgEnv-cray', 'PrgEnv-cray_classic', 'PrgEnv-intel', 'PrgEnv-gnu', 'PrgEnv-pgi', 'PrgEnv-gnu-nocuda', 'PrgEnv-pgi-nocuda' ] if self.current_system.name in ['kesch', 'arolla', 'tsa']: self.exclusive_access = True # Removing static compilation from kesch if (self.current_system.name in ['kesch'] and linkage == 'static'): self.valid_prog_environs = [] self.compilation_time_seconds = None result = sn.findall( r'Hello World from thread \s*(\d+) out ' r'of \s*(\d+) from process \s*(\d+) out of ' r'\s*(\d+)', self.stdout) num_tasks = sn.getattr(self, 'num_tasks') num_cpus_per_task = sn.getattr(self, 'num_cpus_per_task') def tid(match): return int(match.group(1)) def num_threads(match): return int(match.group(2)) def rank(match): return int(match.group(3)) def num_ranks(match): return int(match.group(4)) self.sanity_patterns = sn.all( sn.chain( [ sn.assert_eq(sn.count(result), num_tasks * num_cpus_per_task) ], sn.map(lambda x: sn.assert_lt(tid(x), num_threads(x)), result), sn.map(lambda x: sn.assert_lt(rank(x), num_ranks(x)), result), sn.map(lambda x: sn.assert_lt(tid(x), num_cpus_per_task), result), sn.map( lambda x: sn.assert_eq(num_threads(x), num_cpus_per_task), result), sn.map(lambda x: sn.assert_lt(rank(x), num_tasks), result), sn.map(lambda x: sn.assert_eq(num_ranks(x), num_tasks), result), )) self.perf_patterns = { 'compilation_time': sn.getattr(self, 'compilation_time_seconds') } self.reference = {'*': {'compilation_time': (60, None, 0.1, 's')}} self.maintainers = ['VH', 'EK'] self.tags = {'production', 'craype'}