def test_getitem(self): l = [1, 2, 3] d = {'a': 1, 'b': 2, 'c': 3} self.assertEqual(2, sn.getitem(l, 1)) self.assertEqual(2, sn.getitem(d, 'b')) self.assertRaisesRegex(SanityError, 'index out of bounds: 10', evaluate, sn.getitem(l, 10)) self.assertRaisesRegex(SanityError, 'key not found: k', evaluate, sn.getitem(d, 'k'))
def test_getitem_with_deferrables(self): l = make_deferrable([1, 2, 3]) d = make_deferrable({'a': 1, 'b': 2, 'c': 3}) self.assertEqual(2, sn.getitem(l, 1)) self.assertEqual(2, sn.getitem(d, 'b')) self.assertRaisesRegex(SanityError, 'index out of bounds: 10', evaluate, sn.getitem(l, 10)) self.assertRaisesRegex(SanityError, 'key not found: k', evaluate, sn.getitem(d, 'k'))
def test_getitem(): l = [1, 2, 3] d = {'a': 1, 'b': 2, 'c': 3} assert 2 == sn.getitem(l, 1) assert 2 == sn.getitem(d, 'b') with pytest.raises(SanityError, match='index out of bounds: 10'): sn.evaluate(sn.getitem(l, 10)) with pytest.raises(SanityError, match='key not found: k'): sn.evaluate(sn.getitem(d, 'k'))
def test_getitem_with_deferrables(): l = sn.defer([1, 2, 3]) d = sn.defer({'a': 1, 'b': 2, 'c': 3}) assert 2 == sn.getitem(l, 1) assert 2 == sn.getitem(d, 'b') with pytest.raises(SanityError, match='index out of bounds: 10'): sn.evaluate(sn.getitem(l, 10)) with pytest.raises(SanityError, match='key not found: k'): sn.evaluate(sn.getitem(d, 'k'))
def __init__(self, prg_envs): self.valid_systems = ['daint:gpu', 'dom:gpu'] self.valid_prog_environs = prg_envs self.modules = ['craype-accel-nvidia60'] self.configs = { 'PrgEnv-gnu': 'cscs-gnu', 'PrgEnv-cray': 'cscs-cray', 'PrgEnv-pgi': 'cscs-pgi', } app_source = os.path.join(self.current_system.resourcesdir, 'SPEC_ACCELv1.2') self.prebuild_cmd = [ 'cp -r %s/* .' % app_source, './install.sh -d . -f' ] # I just want prebuild_cmd, but no action for the build_system # is not supported, so I find it something useless to do self.build_system = 'SingleSource' self.sourcepath = './benchspec/ACCEL/353.clvrleaf/src/timer_c.c' self.build_system.cflags = ['-c'] self.refs = { env: { bench_name: (rt, None, 0.1, 'Seconds') for (bench_name, rt) in zip(self.benchmarks[env], self.exec_times[env]) } for env in self.valid_prog_environs } self.num_tasks = 1 self.num_tasks_per_node = 1 self.time_limit = (0, 30, 0) self.executable = 'runspec' outfile = sn.getitem(sn.glob('result/ACCEL.*.log'), 0) self.sanity_patterns_ = { env: sn.all([ sn.assert_found(r'Success.*%s' % bn, outfile) for bn in self.benchmarks[env] ]) for env in self.valid_prog_environs } self.perf_patterns_ = { env: { bench_name: sn.avg( sn.extractall( r'Success.*%s.*runtime=(?P<rt>[0-9.]+)' % bench_name, outfile, 'rt', float)) for bench_name in self.benchmarks[env] } for env in self.valid_prog_environs } self.maintainers = ['SK'] self.tags = {'diagnostic', 'external-resources'}
def __init__(self): self.descr = 'HPCG reference benchmark' self.valid_systems = ['cannon:test', 'fasse:fasse', 'test:rc-testing'] self.valid_prog_environs = ['gnu-mpi'] self.build_system = 'Make' self.build_system.options = ['arch=MPI_GCC_OMP'] self.sourcesdir = 'https://github.com/hpcg-benchmark/hpcg.git' self.executable = 'bin/xhpcg' self.executable_opts = ['--nx=104', '--ny=104', '--nz=104', '-t2'] # use glob to catch the output file suffix dependent on execution time self.output_file = sn.getitem(sn.glob('HPCG*.txt'), 0) self.num_tasks = 96 self.num_cpus_per_task = 1 self.system_num_tasks = { 'cannon:test': 48, 'fasse:fasse': 48, 'test:rc-testing': 32, '*': 32 } self.reference = { 'cannon:test': { 'gflops': (28, -0.1, None, 'Gflop/s') }, 'fasse:fasse': { 'gflops': (28, -0.1, None, 'Gflop/s') }, '*': { 'gflops': (13.4, None, None, 'Gflop/s') } }
def __init__(self): super().__init__() self.descr = 'HPCG reference benchmark' self.valid_systems = [ 'daint:mc', 'daint:gpu', 'dom:gpu', 'dom:mc', 'tiger:gpu' ] self.valid_prog_environs = ['PrgEnv-gnu'] if self.current_system.name in {'daint', 'dom'}: self.modules = ['craype-hugepages8M'] self.build_system = 'Make' self.build_system.options = ['arch=MPI_GCC_OMP'] self.sourcesdir = 'https://github.com/hpcg-benchmark/hpcg.git' self.executable = 'bin/xhpcg' self.executable_opts = ['--nx=104', '--ny=104', '--nz=104', '-t2'] # use glob to catch the output file suffix dependent on execution time self.output_file = sn.getitem(sn.glob('HPCG*.txt'), 0) self.num_tasks = 0 self.num_cpus_per_task = 1 self.system_num_tasks = { 'daint:mc': 36, 'daint:gpu': 12, 'dom:mc': 36, 'dom:gpu': 12, } self.reference = { 'daint:gpu': { 'gflops': (7.6, -0.1, None, 'Gflop/s') }, 'daint:mc': { 'gflops': (13.4, -0.1, None, 'Gflop/s') }, 'dom:gpu': { 'gflops': (7.6, -0.1, None, 'Gflop/s') }, 'dom:mc': { 'gflops': (13.4, -0.1, None, 'Gflop/s') }, '*': { 'gflops': (0, None, None, 'Gflop/s') } } self.maintainers = ['SK', 'EK'] self.tags = {'diagnostic', 'benchmark', 'craype', 'external-resources'}
def __init__(self): super().__init__() self.maintainers = ['SK', 'VK'] self.descr = 'HPCG benchmark on GPUs' self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'HPCG') # there's no binary with support for CUDA 10 yet self.valid_systems = ['daint:gpu'] self.valid_prog_environs = ['PrgEnv-gnu'] self.modules = ['craype-accel-nvidia60', 'craype-hugepages8M'] self.executable = 'xhpcg_gpu_3.1' self.pre_run = ['chmod +x %s' % self.executable] self.num_tasks = 0 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 self.variables = { 'PMI_NO_FORK': '1', 'MPICH_USE_DMAPP_COLL': '1', 'OMP_SCHEDULE': 'static', 'OMP_NUM_THREADS': str(self.num_cpus_per_task), 'HUGETLB_VERBOSE': '0', 'HUGETLB_DEFAULT_PAGE_SIZE': '8M', } self.output_file = sn.getitem(sn.glob('*.yaml'), 0) self.reference = { 'daint:gpu': { 'gflops': (94.7, -0.1, None, 'Gflop/s') }, 'dom:gpu': { 'gflops': (94.7, -0.1, None, 'Gflop/s') }, } num_nodes = self.num_tasks_assigned / self.num_tasks_per_node self.perf_patterns = { 'gflops': sn.extractsingle( r'HPCG result is VALID with a GFLOP\/s rating of:\s*' r'(?P<perf>\S+)', self.output_file, 'perf', float) / num_nodes } self.sanity_patterns = sn.all([ sn.assert_eq(4, sn.count(sn.findall(r'PASSED', self.output_file))), sn.assert_eq(0, self.num_tasks_assigned % self.num_tasks_per_node) ])
def __init__(self, num_tasks): super().__init__() self.tags = {'monch_acceptance'} self.descr = 'HPCG monch acceptance check' self.maintainers = ['VK'] self.valid_systems = ['monch:compute'] self.valid_prog_environs = ['PrgEnv-gnu'] self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'HPCG-CPU') self.executable = './bin/xhpcg' self.num_tasks = num_tasks self.num_tasks_per_node = 1 self.num_cpus_per_task = 20 self.variables = { 'MV2_ENABLE_AFFINITY': '0', 'OMP_NUM_THREADS': str(self.num_cpus_per_task), } self.prebuild_cmd = ['. configure MPI_GCC_OMP'] output_file = sn.getitem(sn.glob('HPCG-Benchmark_*.txt'), 0) self.sanity_patterns = sn.assert_eq( 4, sn.count(sn.findall(r'PASSED', output_file))) reference_by_nodes = { 2: { 'perf': (2.20716, -0.10, None), }, 4: { 'perf': (4.28179, -0.10, None), }, 6: { 'perf': (6.18806, -0.10, None), }, 8: { 'perf': (8.16107, -0.10, None), }, } self.reference = {'monch:compute': reference_by_nodes[num_tasks]} self.perf_patterns = { 'perf': sn.extractsingle( r'HPCG result is VALID with a GFLOP\/s rating of=\s*' r'(?P<perf>\S+)', output_file, 'perf', float) }
def __init__(self): super().__init__() self.descr = 'HPCG reference benchmark' self.valid_systems = ['epydia:remote', 'intelinx:remote'] self.valid_prog_environs = ['Prg-gnu'] self.prebuild_cmd = [ 'git clone https://github.com/hpcg-benchmark/hpcg.git', 'cp Make.cartesius_MPI hpcg/setup/', 'cd hpcg' ] self.build_system = 'Make' self.build_system.flags_from_environ = False self.build_system.options = ['arch=cartesius_MPI'] self.executable = 'hpcg/bin/xhpcg' self.executable_opts = ['--nx=104', '--ny=104', '--nz=104', '--rt=600'] self.output_file = sn.getitem(sn.glob('HPCG*.txt'), 0) self.num_tasks = 12 self.num_cpus_per_task = 1 self.time_limit = (0, 30, 0) self.reference = { 'cartesius:normal-haswell': { 'gflops': (14, -0.1, 0.1, 'GFLOPs/s') }, '*': { 'gflops': (0.0, None, None, 'GFLOPs/s') } } self.maintainers = ['Sagar Dolas'] self.tags = {'diagnostic', 'benchmark'} self.sanity_patterns = sn.assert_found(r'PASSED', self.output_file) self.perf_patterns = { 'gflops': sn.extractsingle( r'HPCG result is VALID with a GFLOP\/s rating of=\s*' r'(?P<perf>\S+)', self.output_file, 'perf', float) }
def __init__(self): super().__init__() self.maintainers = ['VK'] self.descr = 'HPCG check' self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'HPCG') self.valid_systems = ['daint:gpu'] self.valid_prog_environs = ['PrgEnv-gnu'] self.modules = ['craype-accel-nvidia60', 'craype-hugepages8M'] self.executable = 'xhpcg_gpu_3.1' self.num_tasks = 5304 self.num_tasks_per_node = 1 self.num_cpus_per_task = 12 self.variables = { 'PMI_NO_FORK': '1', 'MPICH_USE_DMAPP_COLL': '1', 'OMP_SCHEDULE': 'static', 'OMP_NUM_THREADS': str(self.num_cpus_per_task), 'HUGETLB_VERBOSE': '0', 'HUGETLB_DEFAULT_PAGE_SIZE': '8M', } output_file = sn.getitem(sn.glob('*.yaml'), 0) self.sanity_patterns = sn.assert_eq( 4, sn.count(sn.findall(r'PASSED', output_file))) self.reference = { 'daint:gpu': { 'perf': (476744, -0.10, None) }, } self.perf_patterns = { 'perf': sn.extractsingle( r'HPCG result is VALID with a GFLOP\/s rating of:\s*' r'(?P<perf>\S+)', output_file, 'perf', float) }
def outfile_lazy(self): pattern = 'n%d-%dp-%dt-*.yaml' % ( self.problem_size, self.job.num_tasks, self.num_cpus_per_task) return sn.getitem(sn.glob(pattern), 0)
class HPCGCheckRef(rfm.RegressionTest, HPCGHookMixin): descr = 'HPCG reference benchmark' valid_systems = ['daint:mc', 'daint:gpu', 'dom:gpu', 'dom:mc'] valid_prog_environs = ['PrgEnv-gnu'] build_system = 'Make' sourcesdir = 'https://github.com/hpcg-benchmark/hpcg.git' executable = 'bin/xhpcg' executable_opts = ['--nx=104', '--ny=104', '--nz=104', '-t2'] # use glob to catch the output file suffix dependent on execution time output_file = sn.getitem(sn.glob('HPCG*.txt'), 0) num_tasks = 0 num_cpus_per_task = 1 reference = { 'daint:gpu': { 'gflops': (7.6, -0.1, None, 'Gflop/s') }, 'daint:mc': { 'gflops': (13.4, -0.1, None, 'Gflop/s') }, 'dom:gpu': { 'gflops': (7.6, -0.1, None, 'Gflop/s') }, 'dom:mc': { 'gflops': (13.4, -0.1, None, 'Gflop/s') } } maintainers = ['SK', 'EK'] tags = {'diagnostic', 'benchmark', 'craype', 'external-resources'} @run_after('init') def set_modules(self): if self.current_system.name in {'daint', 'dom'}: self.modules = ['craype-hugepages8M'] @run_before('compile') def set_build_opts(self): self.build_system.options = ['arch=MPI_GCC_OMP'] @property @deferrable def num_tasks_assigned(self): return self.job.num_tasks @run_before('compile') def set_tasks(self): if self.current_partition.processor.num_cores: self.num_tasks_per_node = ( self.current_partition.processor.num_cores) else: self.num_tasks_per_node = 1 @performance_function('Gflop/s') def gflops(self): num_nodes = self.num_tasks_assigned // self.num_tasks_per_node return (sn.extractsingle( r'HPCG result is VALID with a GFLOP\/s rating of=\s*' r'(?P<perf>\S+)', self.output_file, 'perf', float) / num_nodes) @sanity_function def validate_passed(self): return sn.all([ sn.assert_eq(4, sn.count(sn.findall(r'PASSED', self.output_file))), sn.assert_eq(0, self.num_tasks_assigned % self.num_tasks_per_node) ])
class HPCG_GPUCheck(rfm.RunOnlyRegressionTest, HPCGHookMixin): descr = 'HPCG benchmark on GPUs' # there's no binary with support for CUDA 10 yet valid_systems = [] valid_prog_environs = ['PrgEnv-gnu'] modules = ['craype-accel-nvidia60', 'craype-hugepages8M'] executable = 'xhpcg_gpu_3.1' num_tasks = 0 num_tasks_per_node = 1 output_file = sn.getitem(sn.glob('*.yaml'), 0) reference = { 'daint:gpu': { 'gflops': (94.7, -0.1, None, 'Gflop/s') }, 'dom:gpu': { 'gflops': (94.7, -0.1, None, 'Gflop/s') }, } maintainers = ['SK', 'VH'] @run_after('setup') def set_num_tasks(self): if self.current_partition.processor.num_cores: self.num_cpus_per_task = ( self.current_partition.processor.num_cores) else: self.skip(msg='number of cores is not set in the configuration') @run_after('init') def set_sourcedir(self): self.sourcesdir = os.path.join(self.current_system.resourcesdir, 'HPCG') @run_after('init') def set_variables(self): self.variables = { 'PMI_NO_FORK': '1', 'MPICH_USE_DMAPP_COLL': '1', 'OMP_SCHEDULE': 'static', 'OMP_NUM_THREADS': str(self.num_cpus_per_task), 'HUGETLB_VERBOSE': '0', 'HUGETLB_DEFAULT_PAGE_SIZE': '8M', } @run_before('run') def set_exec_permissions(self): self.prerun_cmds = ['chmod +x %s' % self.executable] @sanity_function def validate_passed(self): return sn.all([ sn.assert_eq(4, sn.count(sn.findall(r'PASSED', self.output_file))), sn.assert_eq(0, self.num_tasks_assigned % self.num_tasks_per_node) ]) @performance_function('Gflop/s') def gflops(self): num_nodes = self.num_tasks_assigned // self.num_tasks_per_node return (sn.extractsingle( r'HPCG result is VALID with a GFLOP\/s rating of:\s*' r'(?P<perf>\S+)', self.output_file, 'perf', float) / num_nodes) @property @deferrable def num_tasks_assigned(self): return self.job.num_tasks
def outfile_lazy(self): pattern = (f'n{self.problem_size}-{self.job.num_tasks}p-' f'{self.num_cpus_per_task}t*.*') return sn.getitem(sn.glob(pattern), 0)