Example #1
0
 def libmkl_resolve(self):
     regex = (r'.*\(NEEDED\).*libmkl_(?P<prgenv>[A-Za-z]+)_(?P<version>\S+)'
              r'\.so')
     return sn.all([
         sn.assert_eq(sn.extractsingle(regex, self.stdout, 'prgenv'),
                      'intel'),
         sn.assert_eq(sn.extractsingle(regex, self.stdout, 'version'),
                      'lp64')
     ])
Example #2
0
 def test_assert_eq(self):
     self.assertTrue(sn.assert_eq(1, 1))
     self.assertTrue(sn.assert_eq(1, True))
     self.assertRaisesRegex(SanityError, '1 != 2', evaluate,
                            sn.assert_eq(1, 2))
     self.assertRaisesRegex(SanityError, '1 != False', evaluate,
                            sn.assert_eq(1, False))
     self.assertRaisesRegex(SanityError, '1 is not equals to 2', evaluate,
                            sn.assert_eq(1, 2, '{0} is not equals to {1}'))
Example #3
0
 def validate_passed(self):
     return sn.all([
         sn.assert_not_found(
             r'invalid because the ratio',
             self.outfile_lazy,
             msg='number of processes assigned could not be factorized'),
         sn.assert_eq(4, sn.count(sn.findall(r'PASSED',
                                             self.outfile_lazy))),
         sn.assert_eq(0, self.num_tasks_assigned % self.num_tasks_per_node)
     ])
Example #4
0
def test_assert_eq():
    assert sn.assert_eq(1, 1)
    assert sn.assert_eq(1, True)
    with pytest.raises(SanityError, match='1 != 2'):
        sn.evaluate(sn.assert_eq(1, 2))

    with pytest.raises(SanityError, match='1 != False'):
        sn.evaluate(sn.assert_eq(1, False))

    with pytest.raises(SanityError, match='1 is not equals to 2'):
        sn.evaluate(sn.assert_eq(1, 2, '{0} is not equals to {1}'))
Example #5
0
 def assert_count_gpus(self):
     return sn.all([
         sn.assert_eq(
             sn.count(sn.findall(r'\[\S+\] Found \d+ gpu\(s\)',
                                 self.stdout)), self.num_tasks_assigned),
         sn.assert_eq(
             sn.count(
                 sn.findall(
                     r'\[\S+\] \[gpu \d+\] Kernel launch '
                     r'latency: \S+ us', self.stdout)),
             self.num_tasks_assigned * self.num_gpus_per_node)
     ])
Example #6
0
 def set_sanity(self):
     # {{{ 0/ MPICH version:
     # MPI VERSION    : CRAY MPICH version 7.7.15 (ANL base 3.2)
     # MPI VERSION    : CRAY MPICH version 8.0.16.17 (ANL base 3.3)
     regex = r'^MPI VERSION\s+: CRAY MPICH version \S+ \(ANL base (\S+)\)'
     rpt_file = os.path.join(self.stagedir, self.rpt)
     mpich_version = sn.extractsingle(regex, rpt_file, 1)
     reference_files = {
         '3.2': {
             'control': 'mpit_control_vars_32.ref',
             'categories': 'mpit_categories_32.ref',
         },
         '3.3': {
             'control': 'mpit_control_vars_33.ref',
             'categories': 'mpit_categories_33.ref',
         }
     }
     # }}}
     # {{{ 1/ MPI Control Variables: MPIR_...
     # --- extract reference data:
     regex = r'^(?P<vars>MPIR\S+)$'
     ref_file = os.path.join(
         self.stagedir,
         reference_files[sn.evaluate(mpich_version)]['control']
     )
     self.ref_control_vars = sorted(sn.extractall(regex, ref_file, 'vars'))
     # --- extract runtime data:
     regex = r'^\t(?P<vars>MPIR\S+)\t'
     self.run_control_vars = sorted(sn.extractall(regex, rpt_file, 'vars'))
     # --- debug with: grep -P '\tMPIR+\S*\t' rpt |awk '{print $1}' |sort
     # }}}
     # {{{ 2/ MPI Category:
     # --- extract reference data:
     regex = r'^(?P<category>.*)$'
     ref = os.path.join(
         self.stagedir,
         reference_files[sn.evaluate(mpich_version)]['categories']
     )
     ref_cat_vars = sorted(sn.extractall(regex, ref, 'category'))
     self.ref_cat_vars = list(filter(None, ref_cat_vars))
     # --- extract runtime data:
     regex = (r'^(?P<category>Category \w+ has \d+ control variables, \d+'
              r' performance variables, \d+ subcategories)')
     rpt = os.path.join(self.stagedir, self.rpt)
     self.run_cat_vars = sorted(sn.extractall(regex, rpt, 'category'))
     # }}}
     # {{{ 3/ Extracted lists can be compared (when sorted):
     self.sanity_patterns = sn.all([
         sn.assert_eq(self.ref_control_vars, self.run_control_vars,
                      msg='sanity1 "mpit_control_vars.ref" failed'),
         sn.assert_eq(self.ref_cat_vars, self.run_cat_vars,
                      msg='sanity2 "mpit_categories.ref" failed'),
     ])
Example #7
0
    def do_sanity_check(self):
        # Check that every node has the right number of GPUs
        # Store this nodes in case they're used later by the perf functions.
        self.my_nodes = set(sn.extractall(
            rf'^\s*\[([^\]]*)\]\s*Found {self.num_gpus_per_node} device\(s\).',
            self.stdout, 1))

        # Check that every node has made it to the end.
        nodes_at_end = len(set(sn.extractall(
            r'^\s*\[([^\]]*)\]\s*Pointer chase complete.',
            self.stdout, 1)))
        return sn.evaluate(sn.assert_eq(
            sn.assert_eq(self.job.num_tasks, len(self.my_nodes)),
            sn.assert_eq(self.job.num_tasks, nodes_at_end)))
Example #8
0
 def assert_count_gpus(self):
     '''Assert GPU count is consistent.'''
     return sn.all([
         sn.assert_eq(
             sn.count(sn.findall(r'\[\S+\] Found \d+ gpu\(s\)',
                                 self.stdout)),
             sn.getattr(self.job, 'num_tasks')),
         sn.assert_eq(
             sn.count(
                 sn.findall(
                     r'\[\S+\] \[gpu \d+\] Kernel launch '
                     r'latency: \S+ us', self.stdout)),
             self.job.num_tasks * self.num_gpus_per_node)
     ])
Example #9
0
    def setup(self, partition, environ, **job_opts):
        result = sn.findall(r'Hello World from thread \s*(\d+) out '
                            r'of \s*(\d+) from process \s*(\d+) out of '
                            r'\s*(\d+)', self.stdout)

        self.sanity_patterns = sn.all(
            sn.chain([sn.assert_eq(sn.count(result), self.num_tasks *
                                   self.num_cpus_per_task)],
                     sn.map(
                         lambda x: sn.assert_lt(int(x.group(1)),
                                                int(x.group(2))),
                         result),
                     sn.map(
                         lambda x: sn.assert_lt(int(x.group(3)),
                                                int(x.group(4))),
                         result),
                     sn.map(
                         lambda x: sn.assert_lt(int(x.group(1)),
                                                self.num_cpus_per_task),
                         result),
                     sn.map(
                         lambda x: sn.assert_eq(int(x.group(2)),
                                                self.num_cpus_per_task),
                         result),
                     sn.map(
                         lambda x: sn.assert_lt(int(x.group(3)),
                                                self.num_tasks),
                         result),
                     sn.map(
                         lambda x: sn.assert_eq(int(x.group(4)),
                                                self.num_tasks),
                         result),
                     )
        )

        self.perf_patterns = {
            'compilation_time': sn.getattr(self, 'compilation_time_seconds')
        }
        self.reference = {
            '*': {
                'compilation_time': (60, None, 0.1)
            }
        }

        envname = environ.name.replace('-nompi', '')
        prgenv_flags = self.prgenv_flags[envname]
        self.build_system.cflags = prgenv_flags
        self.build_system.cxxflags = prgenv_flags
        self.build_system.fflags = prgenv_flags
        super().setup(partition, environ, **job_opts)
def test_sanity_multiple_patterns(dummytest, sanity_file, dummy_gpu_exec_ctx):
    sanity_file.write_text('result1 = success\n' 'result2 = success\n')

    # Simulate a pure sanity test; reset the perf_patterns
    dummytest.perf_patterns = None
    dummytest.sanity_patterns = sn.assert_eq(
        sn.count(sn.findall(r'result\d = success', sanity_file)), 2)
    _run_sanity(dummytest, *dummy_gpu_exec_ctx, skip_perf=True)

    # Require more patterns to be present
    dummytest.sanity_patterns = sn.assert_eq(
        sn.count(sn.findall(r'result\d = success', sanity_file)), 3)
    with pytest.raises(SanityError):
        _run_sanity(dummytest, *dummy_gpu_exec_ctx, skip_perf=True)
Example #11
0
    def do_sanity_check(self):
        '''Check that every node has the right number of GPUs.'''

        my_nodes = set(sn.extractall(
            rf'^\s*\[([^\]]*)\]\s*Found {self.num_gpus_per_node} device\(s\).',
            self.stdout, 1))

        # Check that every node has made it to the end.
        nodes_at_end = len(set(sn.extractall(
            r'^\s*\[([^\]]*)\]\s*Pointer chase complete.',
            self.stdout, 1)))
        return sn.assert_eq(
            sn.assert_eq(self.job.num_tasks, sn.count(my_nodes)),
            sn.assert_eq(self.job.num_tasks, nodes_at_end)
        )
def test_hellocheck_local_prepost_run(hellotest, local_exec_ctx):
    @sn.deferrable
    def stagedir(test):
        return test.stagedir

    # Test also the prebuild/postbuild functionality
    hellotest.prerun_cmds = ['echo prerun: `pwd`']
    hellotest.postrun_cmds = ['echo postrun: `pwd`']
    pre_run_path = sn.extractsingle(r'^prerun: (\S+)', hellotest.stdout, 1)
    post_run_path = sn.extractsingle(r'^postrun: (\S+)', hellotest.stdout, 1)
    hellotest.sanity_patterns = sn.all([
        sn.assert_eq(stagedir(hellotest), pre_run_path),
        sn.assert_eq(stagedir(hellotest), post_run_path),
    ])
    _run(hellotest, *local_exec_ctx)
Example #13
0
    def __init__(self):
        super().__init__()
        self.maintainers = ['SK', 'VK']
        self.descr = 'HPCG benchmark on GPUs'
        self.sourcesdir = os.path.join(self.current_system.resourcesdir,
                                       'HPCG')

        # there's no binary with support for CUDA 10 yet
        self.valid_systems = ['daint:gpu']
        self.valid_prog_environs = ['PrgEnv-gnu']
        self.modules = ['craype-accel-nvidia60', 'craype-hugepages8M']
        self.executable = 'xhpcg_gpu_3.1'
        self.pre_run = ['chmod +x %s' % self.executable]
        self.num_tasks = 0
        self.num_tasks_per_node = 1
        self.num_cpus_per_task = 12
        self.variables = {
            'PMI_NO_FORK': '1',
            'MPICH_USE_DMAPP_COLL': '1',
            'OMP_SCHEDULE': 'static',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
            'HUGETLB_VERBOSE': '0',
            'HUGETLB_DEFAULT_PAGE_SIZE': '8M',
        }

        self.output_file = sn.getitem(sn.glob('*.yaml'), 0)

        self.reference = {
            'daint:gpu': {
                'gflops': (94.7, -0.1, None, 'Gflop/s')
            },
            'dom:gpu': {
                'gflops': (94.7, -0.1, None, 'Gflop/s')
            },
        }

        num_nodes = self.num_tasks_assigned / self.num_tasks_per_node
        self.perf_patterns = {
            'gflops':
            sn.extractsingle(
                r'HPCG result is VALID with a GFLOP\/s rating of:\s*'
                r'(?P<perf>\S+)', self.output_file, 'perf', float) / num_nodes
        }

        self.sanity_patterns = sn.all([
            sn.assert_eq(4, sn.count(sn.findall(r'PASSED', self.output_file))),
            sn.assert_eq(0, self.num_tasks_assigned % self.num_tasks_per_node)
        ])
Example #14
0
 def __init__(self):
     super().__init__()
     self.descr = ('OpenFOAM-Extend  check of interMixingFoam: '
                   'dambreak tutorial')
     self.sanity_patterns = sn.assert_eq(
         sn.count(sn.findall(r'Air phase volume fraction', self.stdout)),
         2944)
Example #15
0
    def __init__(self):
        self.sourcepath = 'shmem.cu'
        self.build_system = 'SingleSource'
        self.valid_systems = ['daint:gpu', 'dom:gpu', 'tiger:gpu']
        self.valid_prog_environs = ['PrgEnv-gnu']
        self.num_tasks = 0
        self.num_tasks_per_node = 1
        self.num_gpus_per_node = 1
        if self.current_system.name in {'daint', 'dom', 'tiger'}:
            self.modules = ['craype-accel-nvidia60']

        self.sanity_patterns = sn.assert_eq(
            sn.count(sn.findall(r'Bandwidth', self.stdout)),
            self.num_tasks_assigned * 2)

        self.perf_patterns = {
            'bandwidth':
            sn.extractsingle(r'Bandwidth\(double\) (?P<bw>\S+) GB/s',
                             self.stdout, 'bw', float)
        }
        self.reference = {
            # theoretical limit for P100:
            # 8 [B/cycle] * 1.328 [GHz] * 16 [bankwidth] * 56 [SM] = 9520 GB/s
            'dom:gpu': {
                'bandwidth': (8850, -0.01, 9520 / 8850. - 1, 'GB/s')
            },
            'daint:gpu': {
                'bandwidth': (8850, -0.01, 9520 / 8850. - 1, 'GB/s')
            }
        }

        self.maintainers = ['SK']
        self.tags = {'benchmark', 'diagnostic', 'craype'}
Example #16
0
    def validate_fixture_resolution(self):
        fixt_info = type(self).get_variant_info(self.variant_num)['fixtures']
        return sn.all([
            # Assert that f0 and f1 resolve to the right variants
            sn.all([sn.assert_eq(self.f0.p, 0),
                    sn.assert_eq(self.f1.p, 1)]),

            # Assert the outer product of the fixtures variants is correct even
            # with both fixtures being exactly the same.
            sn.assert_true(self.f2.variant_num in fixt_info['f2']),
            sn.assert_true(self.f3.variant_num in fixt_info['f3']),

            # Assert the join behaviour works correctly
            sn.assert_eq(sn.len({f.variant_num
                                 for f in self.f4}), ParamFixture.num_variants)
        ])
Example #17
0
    def __init__(self):
        super().__init__()
        self.sourcepath = 'strides.cpp'
        self.build_system = 'SingleSource'
        self.valid_systems = ['daint:gpu', 'dom:gpu', 'daint:mc', 'dom:mc']
        self.valid_prog_environs = ['PrgEnv-gnu']
        self.num_tasks = 1
        self.num_tasks_per_node = 1

        self.sanity_patterns = sn.assert_eq(
            sn.count(sn.findall(r'bandwidth', self.stdout)),
            self.num_tasks_assigned)

        self.perf_patterns = {
            'bandwidth':
            sn.extractsingle(r'bandwidth: (?P<bw>\S+) GB/s', self.stdout, 'bw',
                             float)
        }

        self.system_num_cpus = {
            'daint:mc': 72,
            'daint:gpu': 24,
            'dom:mc': 72,
            'dom:gpu': 24,
        }

        self.maintainers = ['SK']
        self.tags = {'benchmark', 'diagnostic'}
Example #18
0
    def __init__(self):
        self.valid_systems = ['daint:gpu', 'dom:gpu', 'tiger:gpu']
        self.valid_prog_environs = ['PrgEnv-cray']
        self.descr = 'Flexible CUDA Memtest'
        self.maintainers = ['TM', 'SK']
        self.num_tasks_per_node = 1
        self.num_tasks = 0
        self.num_gpus_per_node = 1
        self.modules = ['cudatoolkit']
        src_url = ('https://downloads.sourceforge.net/project/cudagpumemtest/'
                   'cuda_memtest-1.2.3.tar.gz')
        self.prebuild_cmd = [
            'wget %s' % src_url, 'tar -xzf cuda_memtest-1.2.3.tar.gz',
            'cd cuda_memtest-1.2.3', 'patch -p1 < ../cuda_memtest-1.2.3.patch'
        ]
        self.build_system = 'Make'
        self.executable = './cuda_memtest-1.2.3/cuda_memtest'
        self.executable_opts = ['--disable_test', '6', '--num_passes', '1']

        valid_test_ids = {i for i in range(11) if i not in {6, 9}}
        assert_finished_tests = [
            sn.assert_eq(
                sn.count(sn.findall('Test%s finished' % test_id, self.stdout)),
                self.num_tasks_assigned) for test_id in valid_test_ids
        ]
        self.sanity_patterns = sn.all([
            *assert_finished_tests,
            sn.assert_not_found('(?i)ERROR', self.stdout),
            sn.assert_not_found('(?i)ERROR', self.stderr)
        ])
        self.tags = {'diagnostic', 'ops', 'craype'}
Example #19
0
    def validate(self):
        # FIXME: This is currently complicated due to GH #2334

        all_tested_nodes = sn.evaluate(
            sn.extractall(
                r'(?P<hostname>\S+):\s+Time for \d+ DGEMM operations',
                self.stdout, 'hostname'))
        num_tested_nodes = len(all_tested_nodes)
        failure_msg = ('Requested %s node(s), but found %s node(s)' %
                       (self.job.num_tasks, num_tested_nodes))
        sn.evaluate(
            sn.assert_eq(num_tested_nodes, self.job.num_tasks,
                         msg=failure_msg))

        pname = self.current_partition.fullname
        arch = self.current_partition.processor.arch
        for hostname in all_tested_nodes:
            key = f'{arch}@{self.num_cpus_per_task}c'
            if key in self.arch_refs:
                self.reference[f'{pname}:{hostname}'] = self.arch_refs[key]

            self.perf_patterns[hostname] = sn.extractsingle(
                fr'{hostname}:\s+Avg\. performance\s+:\s+(?P<gflops>\S+)'
                fr'\sGflop/s', self.stdout, 'gflops', float)

        return True
Example #20
0
def extrae_version(obj):
    '''Checks tool's version. As there is no ``--version`` flag available,
    we read the version from extrae_version.h and compare it to our reference

    .. code-block::

      > cat $EBROOTEXTRAE/include/extrae_version.h
      #define EXTRAE_MAJOR 3
      #define EXTRAE_MINOR 7
      #define EXTRAE_MICRO 1
      returns: True or False
    '''
    reference_tool_version = {
        'daint': '371',
        'dom': '381',
    }
    ref_file = obj.version_file
    regex = (r'#define EXTRAE_MAJOR (?P<v1>\d)\n'
             r'#define EXTRAE_MINOR (?P<v2>\d)\n'
             r'#define EXTRAE_MICRO (?P<v3>\d)')
    v1 = sn.extractsingle(regex, ref_file, 'v1')
    v2 = sn.extractsingle(regex, ref_file, 'v2')
    v3 = sn.extractsingle(regex, ref_file, 'v3')
    version = v1 + v2 + v3
    TorF = sn.assert_eq(
        version, reference_tool_version[obj.current_system.name])
    return TorF
Example #21
0
 def setup(self, partition, environ, **job_opts):
     super().setup(partition, environ, **job_opts)
     environ_name = self.current_environ.name
     prgenv_flags = self.prgenv_flags[environ_name]
     self.build_system.cflags = prgenv_flags
     self.build_system.cxxflags = prgenv_flags
     self.build_system.fflags = prgenv_flags
     regexversion = (r'^Intel\(R\)\sInspector\s\d+\sUpdate\s\d+\s\(build'
                     r'\s(?P<toolsversion>\d+)')
     system_default_toolversion = {
         'daint': '597413',  # 2019 Update 4
         'dom': '597413',  # 2019 Update 4
     }
     toolsversion = system_default_toolversion[self.current_system.name]
     self.sanity_patterns = sn.all([
         # check the job:
         sn.assert_found('SUCCESS', self.stdout),
         # check the tool's version:
         sn.assert_eq(
             sn.extractsingle(regexversion, self.version_rpt,
                              'toolsversion'), toolsversion),
         # check the reports:
         sn.assert_found(r'1 Memory leak problem\(s\) detected',
                         self.summary_rpt),
         sn.assert_found(r'1 Memory not deallocated problem\(s\) detected',
                         self.summary_rpt),
         sn.assert_found(
             r'_main.\w+\(\d+\): Warning X\d+: P\d: '
             r'Memory not deallocated:', self.observations_rpt),
         sn.assert_found(r'_main.\w+\(\d+\): Warning X\d+:',
                         self.problems_rpt),
     ])
Example #22
0
    def __init__(self, kernel_version):
        super().__init__()
        self.sourcepath = 'shmem.cu'
        self.build_system = 'SingleSource'
        self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_prog_environs = ['PrgEnv-gnu']
        self.num_tasks = 0
        self.num_tasks_per_node = 1

        self.sanity_patterns = sn.assert_eq(
            sn.count(sn.findall(r'Bandwidth', self.stdout)),
            self.num_tasks_assigned * 2)

        self.perf_patterns = {
            'bandwidth': sn.extractsingle(
                r'Bandwidth\(double\) (?P<bw>\S+) GB/s',
                self.stdout, 'bw', float)
        }
        # theoretical limit:
        # 8 [B/cycle] * 1.328 [GHz] * 16 [bankwidth] * 56 [SM] = 9520 GB/s
        self.reference = {
            'dom:gpu': {
                'bandwidth': (8850, -0.01, 1. - 9520/8850, 'GB/s')
            },
            'daint:gpu': {
                'bandwidth': (8850, -0.01, 1. - 9520/8850, 'GB/s')
            },
        }

        self.maintainers = ['SK']
        self.tags = {'benchmark', 'diagnostic'}
Example #23
0
    def set_sanity(self):
        envname = self.current_environ.name
        # CCE specific:
        cce_version = None
        if self.lang == 'C++' and envname == 'PrgEnv-cray':
            rptf = os.path.join(self.stagedir, sn.evaluate(self.stdout))
            cce_version = sn.extractsingle(r'CRAY_CC_VERSION=(\d+)\.\S+', rptf,
                                           1, int)

        # OpenMP support varies between compilers:
        self.openmp_versions = {
            'PrgEnv-gnu': {'C++': 201511, 'F90': 201511},
            'PrgEnv-pgi': {'C++': 201307, 'F90': 201307},
            'PrgEnv-intel': {'C++': 201611, 'F90': 201611},
            'PrgEnv-aocc': {'C++': 201107, 'F90': 201307},
            'PrgEnv-cray': {
                'C++': 201511 if cce_version == 10 else 201811,
                'F90': 201511,
            },
        }
        found_version = sn.extractsingle(r'OpenMP-\s*(\d+)', self.stdout, 1,
                                         int)
        self.sanity_patterns = sn.all(
            [
                sn.assert_found('SUCCESS', self.stdout),
                sn.assert_eq(found_version,
                             self.openmp_versions[envname][self.lang]),
            ]
        )
Example #24
0
    def count_successful_burns(self):
        '''Set the sanity patterns to count the number of successful burns.'''

        return sn.assert_eq(
            sn.count(
                sn.findall(r'^\s*\[[^\]]*\]\s*GPU\s*\d+\(OK\)', self.stdout)),
            self.num_tasks_assigned)
Example #25
0
 def set_sanity(self):
     self.sanity_patterns = sn.assert_eq(self.job.exitcode, 0)
     self.perf_patterns = {
         'real_time':
         sn.extractsingle(r'real (?P<real_time>\S+)', self.stderr,
                          'real_time', float)
     }
Example #26
0
    def __init__(self):
        self.valid_systems = [
            'daint:gpu', 'daint:mc', 'dom:gpu', 'dom:mc', 'kesch:cn',
            'kesch:pn', 'tiger:gpu', 'arolla:cn', 'arolla:pn', 'tsa:cn',
            'tsa:pn'
        ]
        self.valid_prog_environs = ['PrgEnv-cray']
        if self.current_system.name == 'kesch':
            self.exclusive_access = True
            self.valid_prog_environs = ['PrgEnv-cray', 'PrgEnv-gnu']
        elif self.current_system.name in ['arolla', 'tsa']:
            self.exclusive_access = True
            self.valid_prog_environs = ['PrgEnv-gnu']

        self.descr = 'MPI Hello World'
        self.sourcesdir = 'src/mpi'
        self.sourcepath = 'mpi_helloworld.c'
        self.maintainers = ['RS', 'AJ']
        self.num_tasks_per_node = 1
        self.num_tasks = 0
        num_processes = sn.extractsingle(
            r'Received correct messages from (?P<nprocs>\d+) processes',
            self.stdout, 'nprocs', int)
        self.sanity_patterns = sn.assert_eq(num_processes,
                                            self.num_tasks_assigned - 1)
        self.tags = {'diagnostic', 'ops', 'craype'}
Example #27
0
    def __init__(self):
        self.valid_prog_environs = ['builtin']
        self.executable = 'cp2k.psmp'
        self.executable_opts = ['H2O-256.inp']

        energy = sn.extractsingle(
            r'\s+ENERGY\| Total FORCE_EVAL \( QS \) '
            r'energy \(a\.u\.\):\s+(?P<energy>\S+)',
            self.stdout,
            'energy',
            float,
            item=-1)
        energy_reference = -4404.2323
        energy_diff = sn.abs(energy - energy_reference)
        self.sanity_patterns = sn.all([
            sn.assert_found(r'PROGRAM STOPPED IN', self.stdout),
            sn.assert_eq(
                sn.count(
                    sn.extractall(r'(?P<step_count>STEP NUM)', self.stdout,
                                  'step_count')), 10),
            sn.assert_lt(energy_diff, 1e-4)
        ])

        self.perf_patterns = {
            'time':
            sn.extractsingle(r'^ CP2K(\s+[\d\.]+){4}\s+(?P<perf>\S+)',
                             self.stdout, 'perf', float)
        }

        self.maintainers = ['LM']
        self.tags = {'scs'}
        self.strict_check = False
        self.modules = ['CP2K']
        self.extra_resources = {'switches': {'num_switches': 1}}
Example #28
0
    def __init__(self):
        self.sourcepath = 'strides.cpp'
        self.build_system = 'SingleSource'
        self.valid_systems = [
            'cannon:local', 'cannon:local-gpu', 'cannon:gpu_test',
            'cannon:test', 'fasse:fasse', 'test:rc-testing'
        ]
        self.valid_prog_environs = ['builtin', 'gnu', 'gpu', 'intel']
        self.build_system.cxxflags = ['-std=c++11', '-lpthread']
        self.num_tasks = 1
        self.num_tasks_per_node = 1

        self.sanity_patterns = sn.assert_eq(
            sn.count(sn.findall(r'bandwidth', self.stdout)),
            self.num_tasks_assigned)

        self.perf_patterns = {
            'bandwidth':
            sn.extractsingle(r'bandwidth: (?P<bw>\S+) GB/s', self.stdout, 'bw',
                             float)
        }

        self.system_num_cpus = {
            'cannon:local': 48,
            'cannon:local-gpu': 32,
            'cannon:gpu_test': 16,
            'cannon:test': 48,
            'fasse:fasse': 48,
            'test:rc-testing': 36,
            '*': 32,
        }
Example #29
0
    def __init__(self):
        super().__init__()
        self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_prog_environs = ['PrgEnv-cray']
        self.descr = 'Flexible Cuda Memtest'
        self.maintainers = ['TM', 'VK']
        self.num_tasks_per_node = 1
        self.num_tasks = 0
        self.num_gpus_per_node = 1
        self.modules = ['cudatoolkit']
        self.sourcesdir = None
        src_url = ('https://downloads.sourceforge.net/project/cudagpumemtest/'
                   'cuda_memtest-1.2.3.tar.gz')
        self.prebuild_cmd = [
            'wget %s' % src_url,
            'tar -xzf cuda_memtest-1.2.3.tar.gz --strip-components=1'
        ]
        self.executable = 'cuda_memtest_sm20'
        self.executable_opts = ['--disable_test', '6', '--num_passes', '1']

        valid_test_ids = {i for i in range(11) if i not in {6, 9}}
        assert_finished_tests = [
            sn.assert_eq(
                sn.count(sn.findall('Test%s finished' % test_id, self.stdout)),
                self.num_tasks_assigned)
            for test_id in valid_test_ids
        ]
        self.sanity_patterns = sn.all([
            *assert_finished_tests,
            sn.assert_not_found('(?i)ERROR', self.stdout),
            sn.assert_not_found('(?i)ERROR', self.stderr)])
Example #30
0
    def assert_consumed_cpu_set(self):
        '''Check that all the resources have been consumed.

        Tests derived from this class must implement a hook that consumes
        the cpu set as the results from the affinity tool are processed.
        '''
        return sn.assert_eq(self.cpu_set, set())
Example #31
0
 def __init__(self, **kwargs):
     super().__init__('gemm_example', **kwargs)
     self.sourcepath = 'gemm/'
     self.executable = './gemm/gemm.openacc'
     self.num_cpus_per_task = 12
     self.variables = {'OMP_NUM_THREADS': str(self.num_cpus_per_task)}
     self.sanity_patterns = sn.assert_eq(
         3, sn.count(sn.extractall('success', self.stdout))
     )
Example #32
0
    def __init__(self, **kwargs):
        super().__init__('image_pipeline_example', **kwargs)
        self.sourcepath = 'image-pipeline/'
        self.valid_prog_environs = ['PrgEnv-pgi']

        # We need to reload the PGI compiler here, cos OpenCV loads PrgEnv-gnu
        self.modules = ['craype-accel-nvidia60', 'OpenCV', 'pgi']
        self.executable = './image-pipeline/filter.x'
        self.executable_opts = ['image-pipeline/california-1751455_1280.jpg',
                                'image-pipeline/output.jpg']
        self.sanity_patterns = sn.assert_eq(
            {'original', 'blocked', 'update', 'pipelined', 'multi'},
            dset(sn.extractall('Time \((\S+)\):.*', self.stdout, 1)))