def __init__(self, mpi_task):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi',
                                    'PrgEnv-cray']
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'cpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.tool = 'scalasca'
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        tool_ver = '2.5'
        tc_ver = '20.08'
        self.tool_modules = {
            'PrgEnv-gnu': [f'Scalasca/{tool_ver}-CrayGNU-{tc_ver}'],
            'PrgEnv-intel': [f'Scalasca/{tool_ver}-CrayIntel-{tc_ver}'],
            'PrgEnv-cray': [f'Scalasca/{tool_ver}-CrayCCE-{tc_ver}'],
            'PrgEnv-pgi': [f'Scalasca/{tool_ver}-CrayPGI-{tc_ver}'],
        }
        self.prgenv_flags = {
            'PrgEnv-gnu': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
                           '-DUSE_MPI', '-DNDEBUG'],
            'PrgEnv-intel': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
                             '-DUSE_MPI', '-DNDEBUG'],
            'PrgEnv-cray': ['-I.', '-I./include', '-std=c++17', '-g', '-Ofast',
                            '-DUSE_MPI', '-DNDEBUG'],
            'PrgEnv-pgi': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
                           '-DUSE_MPI', '-DNDEBUG'],
        }
        self.build_system = 'SingleSource'
        self.build_system.cxx = 'scorep --mpp=mpi --nocompiler CC'
        self.sourcepath = f'{self.testname}.cpp'
        self.executable = f'./{self.testname}.exe'
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        cycles = cycles_dict[mpi_task]
        self.name = \
            'sphexa_scalascaS+T_{}_{:03d}mpi_{:03d}omp_{}n_{}steps_{}cycles'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps, cycles)
        self.num_tasks_per_node = 24
# {{{ ht:
        # self.num_tasks_per_node = mpitask if mpitask < 36 else 36   # noht
        # self.use_multithreading = False  # noht
        # self.num_tasks_per_core = 1      # noht

        # self.num_tasks_per_node = mpitask if mpitask < 72 else 72
        # self.use_multithreading = True # ht
        # self.num_tasks_per_core = 2    # ht
# }}}
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 2
        self.use_multithreading = True
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
            'SCOREP_ENABLE_PROFILING': 'false',
            'SCOREP_ENABLE_TRACING': 'true',
            'SCOREP_ENABLE_UNWINDING': 'true',
            'SCOREP_SAMPLING_EVENTS': 'perf_cycles@%s' % cycles,
            # 'SCOREP_SAMPLING_EVENTS': 'PAPI_TOT_CYC@1000000',
            # 'SCOREP_SAMPLING_EVENTS': 'PAPI_TOT_CYC@%s' % cycles,
            # export SCOREP_SAMPLING_EVENTS=PAPI_TOT_CYC@1000000
            # empty SCOREP_SAMPLING_EVENTS will profile mpi calls only:
            # ok: 'SCOREP_SAMPLING_EVENTS': '',
            # 'SCOREP_METRIC_PAPI': 'PAPI_TOT_INS,PAPI_TOT_CYC',
            # 'SCOREP_METRIC_RUSAGE': 'ru_maxrss',
            # 'SCOREP_TIMER': 'clock_gettime',
            # 'SCOREP_PROFILING_MAX_CALLPATH_DEPTH': '1',
            # 'SCOREP_VERBOSE': 'true',
            # To avoid "No free memory page available":
            'SCOREP_TOTAL_MEMORY': '1G',
            # Advanced performance metrics:
            'SCOREP_METRIC_RUSAGE': 'ru_maxrss',
            'SCOREP_METRIC_PAPI': 'PAPI_TOT_INS,PAPI_TOT_CYC',
        }
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.info_rpt = 'info.rpt'
        self.rpt = 'rpt'
        # must use scorep.score:
        self.score_rpt = '%s.postproc' % self.rpt
        self.stat_rpt = 'scorep_%s_%s_trace/trace.stat' % \
                        (self.testname, self.num_tasks)
        # self.rpt_inclusive = '%s.inclusive' % self.rpt
        # self.rpt_exclusive = '%s.exclusive' % self.rpt
        # self.cubetool = 'cube_calltree'
        self.executable_opts = [
            f'-n {self.cubeside}', f'-s {self.steps}', '2>&1']
        self.prerun_cmds = [
            'module rm xalt',
            f'{self.tool} -V &> {self.version_rpt}',
            f'scorep --version >> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
            f'which scorep >> {self.which_rpt}',
            # f'which {self.cubetool} >> {self.which_rpt}',
            f'scorep-info config-summary &> {self.info_rpt}',
        ]
        cubetree = 'cube_calltree -m time -p -t 1'
        # -m metricname -- print out values for the metric <metricname>
        # -i            -- calculate inclusive values instead of exclusive
        # -t treshold   -- print out only call path with a value larger
        #                  than <treshold>%
        # -p            -- diplay percent value
        self.postrun_cmds = [
            # can't test directly from vampir gui, dumping tracefile content:
            'otf2-print scorep_*_trace/traces.otf2 > %s' % self.rpt
            # 'otf2-print scorep-*/traces.otf2 > %s' % self.rpt
        ]
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            # check the tool's version and configuration:
            sn.assert_true(sphsscorep.scorep_version(self)),
            sn.assert_true(sphsscorep.scorep_info_papi_support(self)),
            sn.assert_true(sphsscorep.scorep_info_perf_support(self)),
            sn.assert_true(sphsscorep.scorep_info_unwinding_support(self)),
            # check the report:
            sn.assert_eq(sphsscorep.program_begin_count(self), self.num_tasks),
            sn.assert_eq(sphsscorep.program_end_count(self), self.num_tasks),
            # check the summary report:
            # sn.assert_found(r'^S=C=A=N: \S+ complete\.', self.stderr)
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        basic_perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        # tool: scalasca
        tool_perf_patterns = sn.evaluate(sphssca.rpt_trace_stats_d(self))
        self.perf_patterns = {**basic_perf_patterns, **tool_perf_patterns}
        # tool: scorep
        self.perf_patterns.update({
            'max_ipc_rk0': sphsscorep.ipc_rk0(self),
            'max_rumaxrss_rk0': sphsscorep.ru_maxrss_rk0(self),
        })
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        # tool
        myzero_n = (0, None, None, 'count')
        myzero_ipc = (0, None, None, 'ins/cyc')
        myzero_kb = (0, None, None, 'kilobytes')
        # tool
        self.reference['*:mpi_latesender'] = myzero_n
        self.reference['*:mpi_latesender_wo'] = myzero_n
        self.reference['*:mpi_latereceiver'] = myzero_n
        self.reference['*:mpi_wait_nxn'] = myzero_n
        self.reference['*:max_ipc_rk0'] = myzero_ipc
        self.reference['*:max_rumaxrss_rk0'] = myzero_kb
예제 #2
0
 def set_basic_reference(self):
     self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
예제 #3
0
    def __init__(self, mpi_task):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = [
            'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi', 'PrgEnv-cray'
        ]
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'cpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.tool = 'inspxe-cl'
        self.modules = ['inspector']
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        self.tool_v = '2020_update2'
        tc_ver = '20.08'
        self.tool_modules = {
            'PrgEnv-gnu':
            [f'CrayGNU/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
            'PrgEnv-intel':
            [f'CrayIntel/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
            'PrgEnv-cray':
            [f'CrayCCE/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
            'PrgEnv-pgi':
            [f'CrayPGI/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
        }
        self.prgenv_flags = {
            'PrgEnv-gnu': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-intel': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-cray': [
                '-I.', '-I./include', '-std=c++17', '-g', '-Ofast',
                '-DUSE_MPI', '-DNDEBUG'
            ],
            'PrgEnv-pgi': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
        }
        self.build_system = 'SingleSource'
        # self.build_system.cxx = 'CC'
        self.sourcepath = f'{self.testname}.cpp'
        self.executable = self.tool
        self.target_executable = f'./{self.testname}.exe'
        self.postbuild_cmds = [f'mv {self.tool} {self.target_executable}']
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        self.name = 'sphexa_inspector_{}_{:03d}mpi_{:03d}omp_{}n_{}steps'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps)
        self.num_tasks_per_node = 24
        # {{{ ht:
        # self.num_tasks_per_node = mpitask if mpitask < 36 else 36   # noht
        # self.use_multithreading = False  # noht
        # self.num_tasks_per_core = 1      # noht

        # self.num_tasks_per_node = mpitask if mpitask < 72 else 72
        # self.use_multithreading = True # ht
        # self.num_tasks_per_core = 2    # ht
        # }}}
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 2
        self.use_multithreading = True
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
        }
        self.dir_rpt = 'rpt'
        self.tool_opts = '-collect mi1 -trace-mpi -no-auto-finalize -r %s' \
            % self.dir_rpt
        self.executable_opts = [
            self.tool_opts, self.target_executable, f'-n {self.cubeside}',
            f'-s {self.steps}', '2>&1'
        ]
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.summary_rpt = 'summary.rpt'
        self.prerun_cmds = [
            'module rm xalt',
            f'{self.tool} --version >> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
        ]
        self.postrun_cmds = [
            f'{self.tool} -r {self.dir_rpt}.* -report=summary '
            f'&> {self.summary_rpt}',
            # '%s -report=problems &> %s' % (self.tool, self.problems_rpt),
            # '%s -report=observations &> %s' %
            # (self.tool, self.observations_rpt),
        ]
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            # check the tool's version:
            sn.assert_true(sphsintel.inspector_version(self)),
            # check the summary report:
            sn.assert_found(r'\d new problem\(s\) found', self.summary_rpt),
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        self.perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        # tool
        self.perf_patterns.update({
            'Memory not deallocated':
            sphsintel.inspector_not_deallocated(self),
            # 'Memory leak': sphsintel.inspector_leak(self),
        })
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        # tool
        self.reference['*:Memory not deallocated'] = (0, None, None, '')
예제 #4
0
    def __init__(self, mpitask, steps, cycles, rumetric):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = ['PrgEnv-pgi']
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'gpu', 'openacc'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.prebuild_cmds = ['module rm xalt']
        self.prgenv_flags = {
            'PrgEnv-pgi': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DNDEBUG',
                '-DUSE_MPI', '-DUSE_ACC', '-DUSE_STD_MATH_IN_KERNELS', '-acc',
                '-ta=tesla:managed,cc60'
            ],  # -mp
        }
        # ---------------------------------------------------------------- tool
        self.modules = ['craype-accel-nvidia60']
        tc_ver = '19.10'
        tool_ver = '6.0'
        postproc_tool_ver = '4ef9d3f'
        postproc_tool_serial = 'otf-profiler'
        self.postproc_tool = 'otf-profiler-mpi'
        self.tool_modules = {
            'PrgEnv-pgi': ['Score-P/%s-CrayPGI-%s' % (tool_ver, tc_ver)]
        }
        # ---------------------------------------------------------------- tool
        self.build_system = 'SingleSource'
        self.build_system.cxx = 'scorep-CC'
        self.sourcepath = '%s.cpp' % self.testname
        self.executable = '%s.exe' % self.testname
        # {{{ openmp:
        # 'PrgEnv-intel': ['-qopenmp'],
        # 'PrgEnv-gnu': ['-fopenmp'],
        # 'PrgEnv-pgi': ['-mp'],
        # 'PrgEnv-cray_classic': ['-homp'],
        # 'PrgEnv-cray': ['-fopenmp'],
        # # '-homp' if lang == 'F90' else '-fopenmp',
        # }}}
        # }}}

        # {{{ run
        ompthread = 1
        # weak scaling = 10^6 p/cn:
        size_dict = {
            1: 100,
            2: 126,
            4: 159,
            8: 200,
            16: 252,
            32: 318,
            64: 400,
            128: 504,
            256: 635
        }
        cubesize = size_dict[mpitask]
        self.name = \
            'openacc_scorepT_{}_{:03d}mpi_{:03d}omp_{}n_{}steps_{}cycles_{}'. \
            format(self.testname, mpitask, ompthread, cubesize, steps, cycles,
                   rumetric)
        self.num_tasks = mpitask
        self.num_tasks_per_node = 1
        # {{{ ht:
        # self.num_tasks_per_node = mpitask if mpitask < 36 else 36   # noht
        # self.use_multithreading = False  # noht
        # self.num_tasks_per_core = 1      # noht

        # self.num_tasks_per_node = mpitask if mpitask < 72 else 72
        # self.use_multithreading = True # ht
        # self.num_tasks_per_core = 2    # ht
        # }}}
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 1
        self.use_multithreading = False
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'SCOREP_OPENACC_ENABLE': 'yes',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
            'SCOREP_WRAPPER_INSTRUMENTER_FLAGS': '"--mpp=mpi --openacc"',
            'SCOREP_ENABLE_PROFILING': 'false',
            'SCOREP_ENABLE_TRACING': 'true',
            'SCOREP_FILTERING_FILE': 'myfilt',
            'SCOREP_VERBOSE': 'true',
            # Needed to avoid "No free memory page available"
            'SCOREP_TOTAL_MEMORY': '1G',
            # Adding some performance metrics:
            # http://scorepci.pages.jsc.fz-juelich.de/scorep-pipelines/docs/
            # => scorep-6.0/html/measurement.html#rusage_counters
            # => https://vampir.eu/public/files/pdf/spcheatsheet_letter.pdf
            #   'SCOREP_METRIC_RUSAGE': 'ru_maxrss',
            #   'SCOREP_METRIC_RUSAGE': 'ru_maxrss,ru_utime',
            #   'SCOREP_METRIC_RUSAGE': 'ru_maxrss',
            #   'SCOREP_METRIC_RUSAGE': '',
            'SCOREP_METRIC_RUSAGE': rumetric,
            'SCOREP_METRIC_PAPI': 'PAPI_TOT_INS,PAPI_TOT_CYC',
        }
        self.rusage_name = sn.evaluate(sphsscacc.otf2cli_metric_name(self))
        if cycles > 0:
            self.variables['SCOREP_SAMPLING_EVENTS'] \
                = 'perf_cycles@%s' % cycles

        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.info_rpt = 'scorep-info.rpt'
        self.rpt = 'rpt'
        self.rpt_jsn = 'result.json'
        self.rpt_inclusive = '%s.inclusive' % self.rpt
        self.rpt_exclusive = '%s.exclusive' % self.rpt
        self.tool = 'scorep'
        self.executable_opts = ['-n %s' % cubesize, '-s %s' % steps]
        self.prerun_cmds = [
            'module rm xalt',
            '%s --version &> %s' % (self.tool, self.version_rpt),
            'which %s &> %s' % (self.tool, self.which_rpt),
            'scorep-info config-summary &> %s' % self.info_rpt,
        ]
        self.postrun_cmds = [
            # otf-profiler is needed for postprocessing but i managed to
            # compile only gnu version => removing CubeLib to avoid conflict
            # with CrayPGI:
            'module rm CubeLib',
            'module load otf2_cli_profile/%s-CrayGNU-%s' %
            (postproc_tool_ver, tc_ver),
            # report post-processing tools version
            '%s --version' % postproc_tool_serial,
            # OTF-Profiler version 2.0.0
            'which %s %s' % (postproc_tool_serial, self.postproc_tool),
            # create result.json performance report from tracefile
            # see otf_profiler method (@run_after)
        ]
        # }}}

        # {{{ sanity
        # sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            # check the tool's version and configuration:
            sn.assert_true(sphsscorep.scorep_version(self)),
            # Needed when using papi counters:
            # sn.assert_true(sphsscorep.scorep_info_papi_support(self)),
            sn.assert_true(sphsscorep.scorep_info_perf_support(self)),
            sn.assert_true(sphsscorep.scorep_info_unwinding_support(self)),
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        # use linux date as timer:
        self.prerun_cmds += ['echo starttime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        basic_perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        tool_perf_patterns = sn.evaluate(sphsscacc.otf2cli_perf_patterns(self))
        self.perf_patterns = {**basic_perf_patterns, **tool_perf_patterns}
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        self.reference = sn.evaluate(sphsscacc.otf2cli_tool_reference(self))
예제 #5
0
    def __init__(self, mpi_task):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = ['PrgEnv-gnu']
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'cpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.sourcesdir = 'src_cpu'
        self.prgenv_flags = {
            'PrgEnv-gnu': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG', '-fopenmp'
            ]
        }
        tool_ver = '2.8'
        tc_ver = '20.08'
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        self.tool_modules = {
            'PrgEnv-gnu': [f'CrayGNU/.{tc_ver}', f'gperftools/{tool_ver}'],
        }
        self.build_system = 'SingleSource'
        self.sourcepath = f'{self.testname}.cpp'
        self.executable = './gperftools.sh'
        self.tool_executable = f'gperftools_script.sh'
        self.target_executable = f'./{self.testname}.exe'
        self.prebuild_cmds = [
            'module rm xalt',
            'module list -t',
        ]
        # }}}

        # {{{  run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        self.name = 'sphexa_gperf_{}_{:03d}mpi_{:03d}omp_{}n_{}steps'.format(
            self.testname, mpi_task, ompthread, self.cubeside, self.steps)
        self.num_tasks_per_node = 12
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 1
        self.use_multithreading = False
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
            'OMP_PROC_BIND': 'true',
        }
        self.executable_opts = [
            self.target_executable, f"-n {self.cubeside}", f"-s {self.steps}"
        ]
        self.prerun_cmds += [
            'module rm xalt', 'module list -t',
            f'mv {self.executable} {self.target_executable}',
            f'mv {self.tool_executable} {self.executable}'
        ]
        self.rpt_file = 'gperftools.rpt'
        self.rpt_file_txt = f'{self.rpt_file}.txt'
        self.rpt_file_pdf = f'{self.rpt_file}.pdf'
        self.rpt_file_doc = f'{self.rpt_file}.doc'
        self.postrun_tool = '$EBROOTPPROF/bin/pprof'
        self.postrun_cmds += [
            # txt rpt (quotation mark will make it fail):
            f'{self.postrun_tool} --unit=ms --text --lines '
            f'{self.target_executable} *.0 &> {self.rpt_file_txt}',
            # pdf rpt:
            f'{self.postrun_tool} --pdf {self.target_executable} *.0 '
            f'&> {self.rpt_file_pdf}',
            # pdf rpt type:
            f'file {self.rpt_file_pdf} &> {self.rpt_file_doc}',
            # '$EBROOTPPROF/bin/pprof --unit=ms --text --lines %s %s &> %s' %
            # (self.exe, '*.0', self.rpt_file_txt),
            # '$EBROOTPPROF/bin/pprof --pdf %s %s &> %s' %
            # (self.exe, '*.0', self.rpt_file_pdf),
            # 'file %s &> %s' % (self.rpt_file_pdf, self.rpt_file_doc)
        ]
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            sn.assert_found('PDF document', self.rpt_file_doc),
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        basic_perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        tool_perf_patterns = sn.evaluate(sphsgperf.gp_perf_patterns(self))
        self.perf_patterns = {**basic_perf_patterns, **tool_perf_patterns}
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        self.reference = sn.evaluate(sphsgperf.gp_tool_reference(self))
예제 #6
0
    def __init__(self, mpi_task):
        # super().__init__()
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = [
            'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi', 'PrgEnv-cray'
        ]
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'cpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.tool = 'valgrind4hpc'
        self.prgenv_flags = {
            'PrgEnv-gnu': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O0', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-intel': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O0', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-cray': [
                '-I.', '-I./include', '-std=c++17', '-g', '-O0', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-pgi': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O0', '-DUSE_MPI',
                '-DNDEBUG'
            ],
        }
        tc_ver = '20.08'
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        self.tool_modules = {
            'PrgEnv-gnu': [f'CrayGNU/.{tc_ver}', self.tool],
            'PrgEnv-intel': [f'CrayIntel/.{tc_ver}', self.tool],
            'PrgEnv-cray': [f'CrayCCE/.{tc_ver}', self.tool],
            'PrgEnv-pgi': [f'CrayPGI/.{tc_ver}', self.tool],
        }
        self.build_system = 'SingleSource'
        self.sourcepath = f'{self.testname}.cpp'
        self.executable = self.tool
        self.target_executable = f'./{self.testname}.exe'
        self.postbuild_cmds = [
            f'mv {self.executable} {self.target_executable}',
        ]
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        self.name = 'sphexa_valgrind4hpc_{}_{:03d}mpi_{:03d}omp_{}n_{}steps'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps)
        self.num_tasks_per_node = 12
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 1
        self.use_multithreading = False
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE':
            'dynamic',
            'OMP_NUM_THREADS':
            str(self.num_cpus_per_task),
            'PKG_CONFIG_PATH':
            '$VALGRIND4HPC_INSTALL_DIR/lib/pkgconfig:$PKG_CONFIG_PATH',
        }
        self.tool_opts = (
            f' -n{self.num_tasks}'
            # f' --launcher-args=""'
            f' --valgrind-args="--track-origins=yes --leak-check=full"')
        # valgrind4hpc -n32 --launcher-args="-N16 -j2"
        # --valgrind-args="--track-origins=yes --leak-check=full" ./a.out
        # -- arg1 arg2
        self.executable_opts = [
            self.tool_opts, self.target_executable, f'-- -n {self.cubeside}',
            f'-s {self.steps}', '2>&1'
        ]
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.prerun_cmds = [
            'module rm xalt',
            f'echo $VALGRIND4HPC_VERSION > {self.version_rpt}',
            f'grep PACKAGE_VERSION $VALGRIND4HPC_INSTALL_DIR/include/'
            f'valgrind/config.h >> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
        ]
        # }}}

        # {{{ sanity
        sanity_1 = r'Conditional jump or move depends on uninitialised value'
        sanity_2 = r'Uninitialised value was created by a heap allocation'
        sanity_3 = r'All heap blocks were freed -- no leaks are possible'
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            sn.assert_found(sanity_1, self.stdout),
            sn.assert_found(sanity_2, self.stdout),
            sn.assert_found(sanity_3, self.stdout),
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.time_rpt = 'time.rpt'
        # the tool flushes stdout hence we need this trick:
        self.prerun_cmds += [f'echo starttime=`date +%s` > {self.time_rpt}']
        self.postrun_cmds += [
            f'echo stoptime=`date +%s` >> {self.time_rpt}',
            f'cat {self.time_rpt}'
        ]
        # }}}

        # {{{ perf_patterns:
        basic_perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        tool_perf_patterns = sn.evaluate(sphsvalgrind.vhpc_perf_patterns(self))
        self.perf_patterns = {**basic_perf_patterns, **tool_perf_patterns}
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        self.reference = sn.evaluate(sphsvalgrind.vhpc_tool_reference(self))
예제 #7
0
    def __init__(self, mpi_task):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = ['PrgEnv-gnu']
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'gpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.tool = 'scorep'
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        tool_ver = '6.0'
        tc_ver = '20.08'
        self.tool_modules = {
            'PrgEnv-gnu': [f'Score-P/{tool_ver}-CrayGNU-{tc_ver}-cuda'],
        }
        self.build_system = 'Make'
        self.build_system.makefile = 'Makefile'
        self.build_system.nvcc = 'nvcc'
        self.build_system.cxx = 'CC'
        self.build_system.max_concurrency = 2
        self.sourcepath = f'{self.testname}.cpp'
        self.executable = f'./{self.testname}.exe'
        self.target_executable = 'mpi+omp+cuda'
        self.build_system.cxx = 'scorep --mpp=mpi --cuda --nocompiler CC'
        self.build_system.nvcc = 'scorep --cuda --nocompiler nvcc'
        self.build_system.options = [
            self.target_executable,
            f'MPICXX="{self.build_system.cxx}"',
            'SRCDIR=.',
            'BUILDDIR=.',
            'BINDIR=.',
            'CXXFLAGS=-std=c++14',
            'CUDA_PATH=$CUDATOOLKIT_HOME',
            # The makefile adds -DUSE_MPI
        ]
        self.postbuild_cmds = [
            f'mv {self.target_executable}.app '
            f'{self.executable}'
        ]
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        # cycles = cycles_dict[mpi_task]
        self.name = \
            'sphexa_scorep+cuda_{}_{:03d}mpi_{:03d}omp_{}n_{}steps'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps)
        self.num_tasks_per_node = 1
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 1
        self.use_multithreading = False
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
            'SCOREP_ENABLE_PROFILING': 'false',
            'SCOREP_ENABLE_TRACING': 'true',
            'SCOREP_CUDA_ENABLE': 'yes',
            'SCOREP_ENABLE_UNWINDING': 'true',
            # 'SCOREP_SAMPLING_EVENTS': 'perf_cycles@%s' % cycles,
            # 'SCOREP_SAMPLING_EVENTS': 'PAPI_TOT_CYC@1000000',
            # 'SCOREP_SAMPLING_EVENTS': 'PAPI_TOT_CYC@%s' % cycles,
            # export SCOREP_SAMPLING_EVENTS=PAPI_TOT_CYC@1000000
            # empty SCOREP_SAMPLING_EVENTS will profile mpi calls only:
            # ok: 'SCOREP_SAMPLING_EVENTS': '',
            # 'SCOREP_METRIC_PAPI': 'PAPI_TOT_INS,PAPI_TOT_CYC',
            # 'SCOREP_METRIC_RUSAGE': 'ru_maxrss',
            # 'SCOREP_TIMER': 'clock_gettime',
            # 'SCOREP_PROFILING_MAX_CALLPATH_DEPTH': '1',
            # 'SCOREP_VERBOSE': 'true',
            # 'SCOREP_TOTAL_MEMORY': '1G',
        }
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.info_rpt = 'info.rpt'
        self.executable_opts = [
            f'-n {self.cubeside}', f'-s {self.steps}', '2>&1'
        ]
        self.prerun_cmds = [
            'module rm xalt',
            f'{self.tool} --version &> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
            'scorep-info config-summary &> %s' % self.info_rpt,
        ]
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            # check the tool's version and configuration:
            sn.assert_true(sphsscorep.scorep_version(self)),
            sn.assert_true(sphsscorep.scorep_info_papi_support(self)),
            sn.assert_true(sphsscorep.scorep_info_perf_support(self)),
            sn.assert_true(sphsscorep.scorep_info_unwinding_support(self)),
            sn.assert_true(sphsscorep.scorep_info_cuda_support(self)),
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        basic_perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        self.perf_patterns = {**basic_perf_patterns}
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
예제 #8
0
    def __init__(self, mpi_task, cubeside):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = ['PrgEnv-gnu']
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        tc_ver = '20.08'
        self.modules = ['craype-accel-nvidia60', 'perftools-base']
        self.tool = 'pat_report'
        self.tool_modules = {
            'PrgEnv-gnu': [f'CrayGNU/.{tc_ver}', 'perftools-lite-gpu'],
        }
        self.build_system = 'Make'
        self.build_system.makefile = 'Makefile'
        self.build_system.nvcc = 'nvcc'
        self.build_system.cxx = 'CC'
        self.build_system.max_concurrency = 2
        self.sourcepath = f'{self.testname}.cpp'
        self.executable = f'./{self.testname}.exe'
        self.target_executable = 'mpi+omp+cuda'
        self.build_system.options = [
            self.target_executable,
            f'MPICXX="{self.build_system.cxx}"',
            'SRCDIR=.',
            'BUILDDIR=.',
            'BINDIR=.',
            'CXXFLAGS=-std=c++14',
            'CUDA_PATH=$CUDATOOLKIT_HOME',
            # The makefile adds -DUSE_MPI
        ]
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.rpt = 'RUNTIME.rpt'
        self.postbuild_cmds = [
            f'mv {self.target_executable}.app '
            f'{self.executable}',
            f'{self.tool} -V &> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
        ]
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside
        self.steps = steps_dict[mpi_task]
        self.name = \
            'sphexa_perftools-gpu-cuda_{}_{:03d}mpi_{:03d}omp_{}n_{}steps'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps)
        self.num_tasks_per_node = 1
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 1
        self.use_multithreading = False
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
        }
        self.executable_opts = [
            f'-n {self.cubeside}', f'-s {self.steps}', '2>&1'
        ]
        self.prerun_cmds = ['module rm xalt']
        self.postrun_cmds = [
            f'cp {self.executable}+*/rpt-files/RUNTIME.rpt {self.rpt}'
        ]
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            sn.assert_true(sphsptlgpu.tool_version(self)),
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        basic_perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        tool_perf_patterns = sn.evaluate(sphsptlgpu.tool_perf_patterns(self))
        self.perf_patterns = {**basic_perf_patterns, **tool_perf_patterns}
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        # tool's reference
        myzero_p = (0, None, None, '%')
        myzero_mb = (0, None, None, 'MiBytes')
        self.reference['*:host_time%'] = myzero_p
        self.reference['*:device_time%'] = myzero_p
        self.reference['*:acc_copyin'] = myzero_mb
        self.reference['*:acc_copyout'] = myzero_mb
예제 #9
0
    def __init__(self, mpi_task):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = ['PrgEnv-gnu']
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'gpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.tool = 'nvprof'
        self.tool_mf = 'nvhpc'
        tc_ver = '20.08'
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        self.tool_modules = {
            'PrgEnv-gnu': [f'CrayGNU/.{tc_ver}', 'craype-accel-nvidia60',
                           self.tool_mf],
        }
        self.build_system = 'Make'
        self.build_system.makefile = 'Makefile'
        self.build_system.nvcc = 'nvcc'
        self.build_system.cxx = 'CC'
        self.build_system.max_concurrency = 2
        self.executable = self.tool
        self.target_executable = 'mpi+omp+cuda'
        self.build_system.options = [
            self.target_executable, 'MPICXX=CC', 'SRCDIR=.', 'BUILDDIR=.',
            'BINDIR=.', 'CUDA_PATH=$CUDATOOLKIT_HOME',
            # The makefile adds -DUSE_MPI
            # 'CXXFLAGS=',
        ]
        self.postbuild_cmds = [f'mv {self.target_executable}.app '
                               f'{self.target_executable}']
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        self.name = 'sphexa_nvprofcuda_{}_{:03d}mpi_{:03d}omp_{}n_{}steps'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps)
        self.num_tasks_per_node = 1
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 1
        self.use_multithreading = False
        self.exclusive = True
        self.time_limit = '15m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
            # 'COMPUTE_PROFILE': '',
            # 'PMI_NO_FORK': '1',
        }
        self.tool_opts = ''
        # self.tool_opts = r'-o nvprof.output.%h.%p'
        self.executable_opts = [
            self.tool_opts, f'./{self.target_executable}',
            f'-n {self.cubeside}', f'-s {self.steps}', '2>&1']
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.summary_rpt = 'summary.rpt'
        # Reminder: NVreg_RestrictProfilingToAdminUsers=0 (RFC-16) needed since
        # cuda/10.1
        self.postrun_cmds = ['cat /etc/modprobe.d/nvidia.conf']
        self.prerun_cmds = [
            'module rm xalt',
            f'{self.tool} --version &> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
        ]
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            # check the tool's version:
            sn.assert_true(sphsnv.nvprof_version(self)),
            # check the summary report:
            sn.assert_found('NVPROF is profiling process', self.stdout),
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        basic_perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        tool_perf_patterns = sn.evaluate(sphsnv.nvprof_perf_patterns(self))
        self.perf_patterns = {**basic_perf_patterns, **tool_perf_patterns}
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        # tool's reference
        myzero_k = (0, None, None, 'KiB')
        myzero_p = (0, None, None, '%')
        self.reference['*:%cudaMemcpy'] = myzero_p
        self.reference['*:%CUDA_memcpy_HtoD_time'] = myzero_p
        self.reference['*:%CUDA_memcpy_DtoH_time'] = myzero_p
        self.reference['*:CUDA_memcpy_HtoD_KiB'] = myzero_k
        self.reference['*:CUDA_memcpy_DtoH_KiB'] = myzero_k
        self.reference['*:%computeMomentumAndEnergyIAD'] = myzero_p
        self.reference['*:%computeIAD'] = myzero_p
예제 #10
0
파일: extrae.py 프로젝트: kraushm/hpctools
    def __init__(self, mpi_task):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = ['PrgEnv-gnu']
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'cpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.prebuild_cmds = ['module rm xalt']
        self.prgenv_flags = {
            'PrgEnv-gnu': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
        }
        # ---------------------------------------------------------------- tool
        self.tool = 'tool.sh'
        tool_ver = '3.8.1'
        tc_ver = '20.08'
        self.tool_modules = {
            'PrgEnv-gnu': [f'Extrae/{tool_ver}-CrayGNU-{tc_ver}'],
        }
        # ---------------------------------------------------------------- tool
        self.build_system = 'SingleSource'
        self.build_system.cxx = 'CC'
        self.sourcepath = '%s.cpp' % self.testname
        self.executable = self.tool
        self.target_executable = './%s.exe' % self.testname
        # {{{ openmp:
        # 'PrgEnv-intel': ['-qopenmp'],
        # 'PrgEnv-gnu': ['-fopenmp'],
        # 'PrgEnv-pgi': ['-mp'],
        # 'PrgEnv-cray_classic': ['-homp'],
        # 'PrgEnv-cray': ['-fopenmp'],
        # # '-homp' if lang == 'F90' else '-fopenmp',
        # }}}
        # }}}

        # {{{ run
        ompthread = 1
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        self.name = \
            'sphexa_extrae_{}_{:03d}mpi_{:03d}omp_{}n_{}steps'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps)
        self.num_tasks = mpi_task
        self.num_tasks_per_node = 24  # 72
        # {{{ ht:
        # self.num_tasks_per_node = mpitask if mpitask < 36 else 36   # noht
        # self.use_multithreading = False  # noht
        # self.num_tasks_per_core = 1      # noht

        # self.num_tasks_per_node = mpitask if mpitask < 72 else 72
        # self.use_multithreading = True # ht
        # self.num_tasks_per_core = 2    # ht
        # }}}
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 2
        self.use_multithreading = True
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
        }
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.rpt = 'rpt'
        self.tool = './tool.sh'
        self.executable = self.tool
        self.executable_opts = [
            f'-- -n {self.cubeside}', f'-s {self.steps}', '2>&1'
        ]
        self.xml1 = '$EBROOTEXTRAE/share/example/MPI/extrae.xml'
        self.xml2 = 'extrae.xml'
        self.patch = 'extrae.xml.patch'
        self.version_file = 'extrae_version.h'
        self.prerun_cmds = [
            'module rm xalt',
            # tool version
            'cp $EBROOTEXTRAE/include/extrae_version.h %s' % self.version_file,
            # will launch ./tool.sh myexe myexe_args:
            'mv %s %s' % (self.executable, self.target_executable),
            # .xml
            'echo %s &> %s' % (self.xml1, self.which_rpt),
            'patch -i %s %s -o %s' % (self.patch, self.xml1, self.xml2),
            # .sh
            'echo -e \'%s\' >> %s' % (sphsextrae.create_sh(self), self.tool),
            'chmod u+x %s' % (self.tool),
        ]
        self.prv = '%s.prv' % self.target_executable[2:]  # stripping './'
        self.postrun_cmds = [
            'stats-wrapper.sh %s -comms_histo' % self.prv,
        ]
        self.rpt_mpistats = '%s.comms.dat' % self.target_executable
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            # check the tool version:
            sn.assert_true(sphsextrae.extrae_version(self)),
            # check the summary report:
            sn.assert_found(
                r'Congratulations! %s has been generated.' % self.prv,
                self.stdout),
        ])
        # }}}

        # {{{  performance
        # {{{ internal timers
        # use linux date as timer:
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        basic_perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        tool_perf_patterns = sn.evaluate(sphsextrae.rpt_mpistats(self))
        self.perf_patterns = {**basic_perf_patterns, **tool_perf_patterns}
        # }}}

        # {{{ reference:
        basic_reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        self.reference = basic_reference
        # tool's reference
        myzero = (0, None, None, '')
        myzero_p = (0, None, None, '%')
        self.reference['*:num_comms_0-10B'] = myzero
        self.reference['*:num_comms_10B-100B'] = myzero
        self.reference['*:num_comms_100B-1KB'] = myzero
        self.reference['*:num_comms_1KB-10KB'] = myzero
        self.reference['*:num_comms_10KB-100KB'] = myzero
        self.reference['*:num_comms_100KB-1MB'] = myzero
        self.reference['*:num_comms_1MB-10MB'] = myzero
        self.reference['*:num_comms_10MB'] = myzero
        #
        self.reference['*:%_of_bytes_sent_0-10B'] = myzero_p
        self.reference['*:%_of_bytes_sent_10B-100B'] = myzero_p
        self.reference['*:%_of_bytes_sent_100B-1KB'] = myzero_p
        self.reference['*:%_of_bytes_sent_1KB-10KB'] = myzero_p
        self.reference['*:%_of_bytes_sent_10KB-100KB'] = myzero_p
        self.reference['*:%_of_bytes_sent_100KB-1MB'] = myzero_p
        self.reference['*:%_of_bytes_sent_1MB-10MB'] = myzero_p
        self.reference['*:%_of_bytes_sent_10MB'] = myzero_p
예제 #11
0
    def __init__(self, mpi_task, group):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = [
            'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-cray', 'PrgEnv-pgi'
        ]
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'cpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.modules = ['likwid']
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        self.tool = 'likwid-perfctr'
        self.tool_v = '1d6636c'
        tc_ver = '20.08'
        self.tool_modules = {
            'PrgEnv-gnu':
            [f'CrayGNU/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
            'PrgEnv-intel':
            [f'CrayIntel/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
            'PrgEnv-cray':
            [f'CrayCCE/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
            'PrgEnv-pgi':
            [f'CrayPGI/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
        }
        self.prgenv_flags = {
            'PrgEnv-gnu': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-intel': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-cray': [
                '-I.', '-I./include', '-std=c++17', '-g', '-Ofast',
                '-DUSE_MPI', '-DNDEBUG'
            ],
            'PrgEnv-pgi': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
        }
        self.build_system = 'SingleSource'
        self.build_system.cxx = 'CC'
        self.sourcepath = '%s.cpp' % self.testname
        self.executable = self.tool
        self.target_executable = './%s.exe' % self.testname
        # {{{ openmp:
        # 'PrgEnv-intel': ['-qopenmp'],
        # 'PrgEnv-gnu': ['-fopenmp'],
        # 'PrgEnv-pgi': ['-mp'],
        # 'PrgEnv-cray_classic': ['-homp'],
        # 'PrgEnv-cray': ['-fopenmp'],
        # # '-homp' if lang == 'F90' else '-fopenmp',
        # }}}
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        self.group = group
        self.name = 'sphexa_likwid_{}_{:03d}mpi_{:03d}omp_{}n_{}steps_{}'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps, group)
        self.num_tasks_per_node = 1
        self.num_tasks_per_core = 1
        self.use_multithreading = False
        # {{{ ht:
        # self.num_tasks_per_node = mpitask if mpitask < 36 else 36   # noht
        # self.use_multithreading = False  # noht
        # self.num_tasks_per_core = 1      # noht

        # self.num_tasks_per_node = mpitask if mpitask < 72 else 72
        # self.use_multithreading = True # ht
        # self.num_tasks_per_core = 2    # ht
        # }}}
        self.num_cpus_per_task = ompthread
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
        }
        # -Cgpu,perf -n1 -t1 likwid-perfctr -C 0 -g MEM (-m) ./streamGCC
        # -r: generate a report upon successful execution
        # TODO: use rpt-files/RUNTIME.rpt
        # self.executable_opts = ['-C 0', '-g MEM', '-g CLOCK', '-g TMA',
        self.executable_opts = [
            '-C 0',
            '-g %s' % group, self.target_executable, f'-n {self.cubeside}',
            f'-s {self.steps}', '2>&1'
        ]
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        # TODO: likwid-perfctr -g $g -H
        self.prerun_cmds = [
            'module rm xalt',
            f'mv {self.executable} {self.target_executable}',
            f'{self.tool} --version &> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
        ]
        # }}}

        # {{{ sanity
        # sanity_patterns is set externally (in sanity_likwid.py)
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        self.perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        # tool perf_patterns is set externally (in sanity_likwid.py)
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
예제 #12
0
    def __init__(self, mpi_task):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = [
            'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi', 'PrgEnv-cray'
        ]
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'cpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.tool = 'advixe-cl'
        self.modules = ['advisor']
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        self.tool_v = '2020_update2'
        tc_ver = '20.08'
        self.tool_modules = {
            'PrgEnv-gnu':
            [f'CrayGNU/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
            'PrgEnv-intel':
            [f'CrayIntel/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
            'PrgEnv-cray':
            [f'CrayCCE/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
            'PrgEnv-pgi':
            [f'CrayPGI/.{tc_ver}', f'{self.modules[0]}/{self.tool_v}'],
        }
        self.prgenv_flags = {
            'PrgEnv-gnu': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-intel': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-cray': [
                '-I.', '-I./include', '-std=c++17', '-g', '-Ofast',
                '-DUSE_MPI', '-DNDEBUG'
            ],
            'PrgEnv-pgi': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
        }
        self.build_system = 'SingleSource'
        # self.build_system.cxx = 'CC'
        self.sourcepath = f'{self.testname}.cpp'
        self.executable = self.tool
        self.target_executable = f'./{self.testname}.exe'
        self.postbuild_cmds = [f'mv {self.tool} {self.target_executable}']
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        self.name = 'sphexa_advisor_{}_{:03d}mpi_{:03d}omp_{}n_{}steps'.format(
            self.testname, mpi_task, ompthread, self.cubeside, self.steps)
        self.num_tasks_per_node = 24
        # {{{ ht:
        # self.num_tasks_per_node = mpitask if mpitask < 36 else 36   # noht
        # self.use_multithreading = False  # noht
        # self.num_tasks_per_core = 1      # noht

        # self.num_tasks_per_node = mpitask if mpitask < 72 else 72
        # self.use_multithreading = True # ht
        # self.num_tasks_per_core = 2    # ht
        # }}}
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 2
        self.use_multithreading = True
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
            # to avoid core when reporting (venv/jenkins):
            'LANG': 'C',
            'LC_ALL': 'C',
        }
        self.dir_rpt = 'rpt'
        self.tool_opts = '--collect=survey --search-dir src:rp=. ' \
                         '--data-limit=0 --no-auto-finalize --trace-mpi ' \
                         '--project-dir=%s -- ' % self.dir_rpt
        self.executable_opts = [
            self.tool_opts, self.target_executable, f'-n {self.cubeside}',
            f'-s {self.steps}', '2>&1'
        ]
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.summary_rpt = 'summary.rpt'
        self.prerun_cmds = [
            'module rm xalt',
            f'{self.tool} --version >> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
        ]
        self.postrun_cmds = [
            f'cd {self.dir_rpt} ;ln -s nid?????.000 e000 ;cd -',
            f'{self.tool} --report=survey --project-dir={self.dir_rpt} '
            f'&> {self.summary_rpt}',
        ]
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            # check the tool's version:
            sn.assert_true(sphsintel.advisor_version(self)),
            # check the summary report:
            sn.assert_found(r'advixe: This data has been saved',
                            self.summary_rpt),
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        self.perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        # tool
        self.perf_patterns.update({
            'advisor_elapsed':
            sphsintel.advisor_elapsed(self),
            'advisor_loop1_line':
            sphsintel.advisor_loop1_line(self),
        })
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        # tool:
        self.reference['*:advisor_elapsed'] = (0, None, None, 's')
        # TODO: fix loop1_fname to avoid error with --report-file:
        # "Object of type '_DeferredExpression' is not JSON serializable"
        # loop1_fname = sphsintel.advisor_loop1_filename(self)
        loop1_fname = ''
        self.reference['*:advisor_loop1_line'] = (0, None, None, loop1_fname)
예제 #13
0
    def __init__(self, mpi_task):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = ['PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi',
                                    'PrgEnv-cray']
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'cpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.tool = 'scorep'
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        tool_ver = '6.0'
        tc_ver = '20.08'
        self.tool_modules = {
            'PrgEnv-gnu': [f'Score-P/{tool_ver}-CrayGNU-{tc_ver}'],
            'PrgEnv-intel': [f'Score-P/{tool_ver}-CrayIntel-{tc_ver}'],
            'PrgEnv-cray': [f'Score-P/{tool_ver}-CrayCCE-{tc_ver}'],
            'PrgEnv-pgi': [f'Score-P/{tool_ver}-CrayPGI-{tc_ver}'],
        }
        self.prgenv_flags = {
            'PrgEnv-gnu': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
                           '-DUSE_MPI', '-DNDEBUG'],
            'PrgEnv-intel': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
                             '-DUSE_MPI', '-DNDEBUG'],
            'PrgEnv-cray': ['-I.', '-I./include', '-std=c++17', '-g', '-Ofast',
                            '-DUSE_MPI', '-DNDEBUG'],
            'PrgEnv-pgi': ['-I.', '-I./include', '-std=c++14', '-g', '-O3',
                           '-DUSE_MPI', '-DNDEBUG'],
        }
        self.build_system = 'SingleSource'
        self.build_system.cxx = 'scorep --mpp=mpi --nocompiler CC'
        self.sourcepath = f'{self.testname}.cpp'
        self.executable = f'./{self.testname}.exe'
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        cycles = cycles_dict[mpi_task]
        self.name = \
            'sphexa_scorepS+P_{}_{:03d}mpi_{:03d}omp_{}n_{}steps_{}cycles'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps, cycles)
        self.num_tasks_per_node = 24
# {{{ ht:
        # self.num_tasks_per_node = mpitask if mpitask < 36 else 36   # noht
        # self.use_multithreading = False  # noht
        # self.num_tasks_per_core = 1      # noht

        # self.num_tasks_per_node = mpitask if mpitask < 72 else 72
        # self.use_multithreading = True # ht
        # self.num_tasks_per_core = 2    # ht
# }}}
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 2
        self.use_multithreading = True
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
            'SCOREP_ENABLE_PROFILING': 'true',
            'SCOREP_ENABLE_TRACING': 'false',
            'SCOREP_ENABLE_UNWINDING': 'true',
            'SCOREP_SAMPLING_EVENTS': 'perf_cycles@%s' % cycles,
            # 'SCOREP_SAMPLING_EVENTS': 'PAPI_TOT_CYC@1000000',
            # 'SCOREP_SAMPLING_EVENTS': 'PAPI_TOT_CYC@%s' % cycles,
            # export SCOREP_SAMPLING_EVENTS=PAPI_TOT_CYC@1000000
            # empty SCOREP_SAMPLING_EVENTS will profile mpi calls only:
            # ok: 'SCOREP_SAMPLING_EVENTS': '',
            # 'SCOREP_METRIC_PAPI': 'PAPI_TOT_INS,PAPI_TOT_CYC',
            # 'SCOREP_METRIC_RUSAGE': 'ru_maxrss',
            # 'SCOREP_TIMER': 'clock_gettime',
            # 'SCOREP_PROFILING_MAX_CALLPATH_DEPTH': '1',
            # 'SCOREP_VERBOSE': 'true',
            # 'SCOREP_TOTAL_MEMORY': '1G',
        }
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.info_rpt = 'info.rpt'
        self.rpt = 'rpt'
        self.rpt_inclusive = '%s.inclusive' % self.rpt
        self.rpt_exclusive = '%s.exclusive' % self.rpt
        self.executable_opts = [
            f'-n {self.cubeside}', f'-s {self.steps}', '2>&1']
        self.prerun_cmds = [
            'module rm xalt',
            f'{self.tool} --version &> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
            'scorep-info config-summary &> %s' % self.info_rpt,
        ]
        cubetree = 'cube_calltree -m time -p -t 1'
        # -m metricname -- print out values for the metric <metricname>
        # -i            -- calculate inclusive values instead of exclusive
        # -t treshold   -- print out only call path with a value larger
        #                  than <treshold>%
        # -p            -- diplay percent value
        self.postrun_cmds = [
            # working around memory crash in scorep-score:
            '(scorep-score -r scorep-*/profile.cubex ;rm -f core*) > %s' \
            % self.rpt,
            '(%s    scorep-*/profile.cubex ;rm -f core*) >> %s' \
            % (cubetree, self.rpt_exclusive),
            '(%s -i scorep-*/profile.cubex ;rm -f core*) >> %s' \
            % (cubetree, self.rpt_inclusive),
        ]
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            # check the tool's version and configuration:
            sn.assert_true(sphsscorep.scorep_version(self)),
            sn.assert_true(sphsscorep.scorep_info_papi_support(self)),
            sn.assert_true(sphsscorep.scorep_info_perf_support(self)),
            sn.assert_true(sphsscorep.scorep_info_unwinding_support(self)),
            # check the summary report:
            sn.assert_found(r'Estimated aggregate size of event trace',
                            self.rpt)
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        self.perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        # tool
        self.perf_patterns.update({
            'scorep_elapsed': sphsscorep.scorep_elapsed(self),
            '%scorep_USR': sphsscorep.scorep_usr_pct(self),
            '%scorep_MPI': sphsscorep.scorep_mpi_pct(self),
            'scorep_top1': sphsscorep.scorep_top1_pct(self),
            '%scorep_Energy_exclusive':
            sphsscorep.scorep_exclusivepct_energy(self),
            '%scorep_Energy_inclusive':
            sphsscorep.scorep_inclusivepct_energy(self),
        })
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        # tool:
        self.reference['*:scorep_elapsed'] = (0, None, None, 's')
        self.reference['*:%scorep_USR'] = (0, None, None, '%')
        self.reference['*:%scorep_MPI'] = (0, None, None, '%')
        top1_name = sphsscorep.scorep_top1_name(self)
        # TODO: self.reference['*:scorep_top1'] = (0, None, None, top1_name)
        self.reference['*:scorep_top1'] = (0, None, None, '')
        self.reference['*:%scorep_Energy_exclusive'] = (0, None, None, '%')
        self.reference['*:%scorep_Energy_inclusive'] = (0, None, None, '%')
예제 #14
0
    def __init__(self, mpi_task):
        # {{{ pe
        self.descr = 'Tool validation'
        self.valid_prog_environs = [
            'PrgEnv-gnu', 'PrgEnv-intel', 'PrgEnv-pgi', 'PrgEnv-cray'
        ]
        # self.valid_systems = ['daint:gpu', 'dom:gpu']
        self.valid_systems = ['*']
        self.maintainers = ['JG']
        self.tags = {'sph', 'hpctools', 'cpu'}
        # }}}

        # {{{ compile
        self.testname = 'sqpatch'
        self.tool = 'scorep'
        self.prebuild_cmds = ['module rm xalt', 'module list -t']
        tool_ver = '6.0'
        tc_ver = '20.08'
        self.tool_modules = {
            'PrgEnv-gnu': [f'Score-P/{tool_ver}-CrayGNU-{tc_ver}'],
            'PrgEnv-intel': [f'Score-P/{tool_ver}-CrayIntel-{tc_ver}'],
            'PrgEnv-cray': [f'Score-P/{tool_ver}-CrayCCE-{tc_ver}'],
            'PrgEnv-pgi': [f'Score-P/{tool_ver}-CrayPGI-{tc_ver}'],
        }
        self.prgenv_flags = {
            'PrgEnv-gnu': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-intel': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
            'PrgEnv-cray': [
                '-I.', '-I./include', '-std=c++17', '-g', '-Ofast',
                '-DUSE_MPI', '-DNDEBUG'
            ],
            'PrgEnv-pgi': [
                '-I.', '-I./include', '-std=c++14', '-g', '-O3', '-DUSE_MPI',
                '-DNDEBUG'
            ],
        }
        self.build_system = 'SingleSource'
        self.build_system.cxx = 'scorep --mpp=mpi --nocompiler CC'
        self.sourcepath = f'{self.testname}.cpp'
        self.executable = f'./{self.testname}.exe'
        # }}}

        # {{{ run
        ompthread = 1
        self.num_tasks = mpi_task
        self.cubeside = cubeside_dict[mpi_task]
        self.steps = steps_dict[mpi_task]
        cycles = cycles_dict[mpi_task]
        self.name = \
            'sphexa_scorepS+T_{}_{:03d}mpi_{:03d}omp_{}n_{}steps_{}cycles'. \
            format(self.testname, mpi_task, ompthread, self.cubeside,
                   self.steps, cycles)
        self.num_tasks_per_node = 24
        # {{{ ht:
        # self.num_tasks_per_node = mpitask if mpitask < 36 else 36   # noht
        # self.use_multithreading = False  # noht
        # self.num_tasks_per_core = 1      # noht

        # self.num_tasks_per_node = mpitask if mpitask < 72 else 72
        # self.use_multithreading = True # ht
        # self.num_tasks_per_core = 2    # ht
        # }}}
        self.num_cpus_per_task = ompthread
        self.num_tasks_per_core = 2
        self.use_multithreading = True
        self.exclusive = True
        self.time_limit = '10m'
        self.variables = {
            'CRAYPE_LINK_TYPE': 'dynamic',
            'OMP_NUM_THREADS': str(self.num_cpus_per_task),
            'SCOREP_ENABLE_PROFILING': 'false',
            'SCOREP_ENABLE_TRACING': 'true',
            'SCOREP_ENABLE_UNWINDING': 'true',
            'SCOREP_SAMPLING_EVENTS': 'perf_cycles@%s' % cycles,
            # 'SCOREP_SAMPLING_EVENTS': 'PAPI_TOT_CYC@1000000',
            # 'SCOREP_SAMPLING_EVENTS': 'PAPI_TOT_CYC@%s' % cycles,
            # export SCOREP_SAMPLING_EVENTS=PAPI_TOT_CYC@1000000
            # empty SCOREP_SAMPLING_EVENTS will profile mpi calls only:
            # ok: 'SCOREP_SAMPLING_EVENTS': '',
            # 'SCOREP_METRIC_PAPI': 'PAPI_TOT_INS,PAPI_TOT_CYC',
            # 'SCOREP_METRIC_RUSAGE': 'ru_maxrss',
            # 'SCOREP_TIMER': 'clock_gettime',
            # 'SCOREP_PROFILING_MAX_CALLPATH_DEPTH': '1',
            # 'SCOREP_VERBOSE': 'true',
            # ---
            # Needed to avoid "No free memory page available"
            # (SCOREP_TOTAL_MEMORY=16384000 bytes)
            'SCOREP_TOTAL_MEMORY': '1G',
            # ---
            # adding some metrics to test my sanity_functions:
            'SCOREP_METRIC_RUSAGE': 'ru_maxrss',
            'SCOREP_METRIC_PAPI': 'PAPI_TOT_INS,PAPI_TOT_CYC',
        }
        self.version_rpt = 'version.rpt'
        self.which_rpt = 'which.rpt'
        self.info_rpt = 'scorep-info.rpt'
        self.rpt = 'rpt'
        self.rpt_inclusive = '%s.inclusive' % self.rpt
        self.rpt_exclusive = '%s.exclusive' % self.rpt
        self.executable_opts = [
            f'-n {self.cubeside}', f'-s {self.steps}', '2>&1'
        ]
        self.prerun_cmds = [
            'module rm xalt',
            f'{self.tool} --version &> {self.version_rpt}',
            f'which {self.tool} &> {self.which_rpt}',
            'scorep-info config-summary &> %s' % self.info_rpt,
        ]
        self.postrun_cmds = [
            # can't test directly from vampir gui:
            'otf2-print scorep-*/traces.otf2 > %s' % self.rpt
        ]
        # }}}

        # {{{ sanity
        self.sanity_patterns = sn.all([
            # check the job output:
            sn.assert_found(r'Total time for iteration\(0\)', self.stdout),
            # check the tool's version and configuration:
            sn.assert_true(sphsscorep.scorep_version(self)),
            sn.assert_true(sphsscorep.scorep_info_papi_support(self)),
            sn.assert_true(sphsscorep.scorep_info_perf_support(self)),
            sn.assert_true(sphsscorep.scorep_info_unwinding_support(self)),
            # check the report:
            sn.assert_eq(sphsscorep.program_begin_count(self), self.num_tasks),
            sn.assert_eq(sphsscorep.program_end_count(self), self.num_tasks),
            # TODO: create derived metric (ipc) in cube
        ])
        # }}}

        # {{{ performance
        # {{{ internal timers
        self.prerun_cmds += ['echo starttime=`date +%s`']
        self.postrun_cmds += ['echo stoptime=`date +%s`']
        # }}}

        # {{{ perf_patterns:
        self.perf_patterns = sn.evaluate(sphs.basic_perf_patterns(self))
        # tool
        self.perf_patterns.update({
            'max_ipc_rk0':
            sphsscorep.ipc_rk0(self),
            'max_rumaxrss_rk0':
            sphsscorep.ru_maxrss_rk0(self),
        })
        # }}}

        # {{{ reference:
        self.reference = sn.evaluate(sphs.basic_reference_scoped_d(self))
        # tool:
        self.reference['*:max_ipc_rk0'] = (0, None, None, 'ins/cyc')
        self.reference['*:max_rumaxrss_rk0'] = (0, None, None, 'kilobytes')