def _remote_detect(part): def _emit_script(job, env): launcher_cmd = job.launcher.run_command(job) commands = [ f'./bootstrap.sh', f'{launcher_cmd} ./bin/reframe --detect-host-topology=topo.json' ] job.prepare(commands, env, trap_errors=True) getlogger().info( f'Detecting topology of remote partition {part.fullname!r}: ' f'this may take some time...') topo_info = {} try: prefix = runtime.runtime().get_option('general/0/remote_workdir') with _copy_reframe(prefix) as dirname: with osext.change_dir(dirname): job = Job.create(part.scheduler, part.launcher_type(), name='rfm-detect-job', sched_access=part.access) _emit_script(job, [part.local_env]) getlogger().debug('submitting detection script') _log_contents(job.script_filename) job.submit() job.wait() getlogger().debug('job finished') _log_contents(job.stdout) _log_contents(job.stderr) topo_info = json.loads(_contents('topo.json')) except Exception as e: getlogger().warning(f'failed to retrieve remote processor info: {e}') return topo_info
def _merge_files(self, job): with osext.change_dir(job.workdir): out_glob = glob.glob(job.stdout + '_*') err_glob = glob.glob(job.stderr + '_*') self.log(f'merging job array output files: {", ".join(out_glob)}') osext.concat_files(job.stdout, *out_glob, overwrite=True) self.log(f'merging job array error files: {", ".join(err_glob)}') osext.concat_files(job.stderr, *err_glob, overwrite=True)
def test_exception_propagation(tmpdir): wd_save = os.getcwd() try: with osext.change_dir(tmpdir): raise RuntimeError except RuntimeError: assert os.getcwd() == wd_save else: pytest.fail('exception not propagated by the ctx manager')
def test_spack(environ, tmp_path): build_system = bs.Spack() build_system.environment = 'spack_env' build_system.install_opts = ['-j 10'] with osext.change_dir(tmp_path): assert build_system.emit_build_commands(environ) == [ f'spack -e {build_system.environment} install -j 10' ] assert build_system.prepare_cmds() == []
def read_proc_topo(self): '''Import the processor's topology from the reference file. This hook inserts the following attributes based on the processor's topology: - cpu_set: set containing all the cpu IDs - num_cpus - num_cpus_per_core - num_numa_nodes - num_sockets - numa_nodes: dictionary containing the cpu sets for each numa node. The keys of the dictionary are simply the ID of the numa node. - sockets: dictionary containing the cpu sets for each socket. The keys of the dictionary are simply the socket IDs. This hook requires the reference file, so the earliest it can run is after the compilation stage, once the required files have been copied over to the stage directory. ''' cp = self.current_partition.fullname with osext.change_dir(self.stagedir): with open(self.topo_file, 'r') as topo: lscpu = json.load(topo)['cpus'] # Build the cpu set self.cpu_set = {int(x['cpu']) for x in lscpu} self.num_cpus = len(self.cpu_set) # Build the numa sets self.num_numa_nodes = len({int(x['node']) for x in lscpu}) self.num_cpus_per_core = int( self.num_cpus/len({int(x['core']) for x in lscpu}) ) self.numa_nodes = [] for i in range(self.num_numa_nodes): self.numa_nodes.append({ int(y['cpu']) for y in [ x for x in lscpu if int(x['node']) == i ] }) # Build the socket sets self.num_sockets = len({int(x['socket']) for x in lscpu}) self.sockets = [] for i in range(self.num_sockets): self.sockets.append({ int(y['cpu']) for y in [ x for x in lscpu if int(x['socket']) == i ] }) # Store the lscpu output self._lscpu = lscpu
def test_spack_no_env(environ, tmp_path): build_system = bs.Spack() with osext.change_dir(tmp_path): assert build_system.emit_build_commands(environ) == [ f'spack env create -d rfm_spack_env', f'spack -e rfm_spack_env config add ' '"config:install_tree:root:opt/spack"', f'spack -e rfm_spack_env install' ] assert build_system.environment == 'rfm_spack_env'
def parse_output(self): '''Extract the data from the affinity tool.''' re_aff_cpus = r'CPU affinity: \[\s+(?P<cpus>[\d+\s+]+)\]' def parse_cpus(x): return sorted([int(xi) for xi in x.split()]) with osext.change_dir(self.stagedir): self.aff_cpus = sn.extractall(re_aff_cpus, self.stdout, 'cpus', parse_cpus).evaluate()
def test_import_from_file_load_relative(): with osext.change_dir('reframe'): # Load a module from a directory up module = util.import_module_from_file('../reframe/__init__.py') assert reframe.VERSION == module.VERSION assert 'reframe' == module.__name__ assert module is sys.modules.get('reframe') # Load a module from the current directory module = util.import_module_from_file('utility/osext.py') assert 'reframe.utility.osext' == module.__name__ assert module is sys.modules.get('reframe.utility.osext')
def test_spack_no_env(environ, tmp_path): build_system = bs.Spack() with osext.change_dir(tmp_path): assert build_system.emit_build_commands(environ) == [ f'. "$(spack location --spack-root)/share/spack/setup-env.sh"', f'spack env create -d rfm_spack_env', f'spack env activate -V -d rfm_spack_env', f'spack config add "config:install_tree:root:opt/spack"', f'spack install' ] assert build_system.environment == 'rfm_spack_env'
def test_easybuild(environ, tmp_path): build_system = bs.EasyBuild() build_system.easyconfigs = ['ec1.eb', 'ec2.eb'] build_system.options = ['-o1', '-o2'] with osext.change_dir(tmp_path): assert build_system.emit_build_commands(environ) == [ f'export EASYBUILD_BUILDPATH={tmp_path}/easybuild/build', f'export EASYBUILD_INSTALLPATH={tmp_path}/easybuild', f'export EASYBUILD_PREFIX={tmp_path}/easybuild', f'export EASYBUILD_SOURCEPATH={tmp_path}/easybuild', 'eb ec1.eb ec2.eb -o1 -o2' ]
def test_spack_with_spec(environ, tmp_path): build_system = bs.Spack() build_system.environment = 'spack_env' build_system.specs = ['spec1@version1', 'spec2@version2'] specs_str = ' '.join(build_system.specs) with osext.change_dir(tmp_path): assert build_system.emit_build_commands(environ) == [ f'spack -e {build_system.environment} add {specs_str}', f'spack -e {build_system.environment} install' ] assert build_system.prepare_cmds() == [ f'eval `spack -e {build_system.environment} load --sh {specs_str}`' ]
def test_spack(environ, tmp_path): build_system = bs.Spack() build_system.environment = 'spack_env' build_system.install_opts = ['-j 10'] with osext.change_dir(tmp_path): assert build_system.emit_build_commands(environ) == [ f'. "$(spack location --spack-root)/share/spack/setup-env.sh"', f'spack env activate -V -d {build_system.environment}', f'spack install -j 10' ] assert build_system.prepare_cmds() == [ f'. "$(spack location --spack-root)/share/spack/setup-env.sh"', f'spack env activate -V -d {build_system.environment}', ]
def test_easybuild_with_packaging(environ, tmp_path): build_system = bs.EasyBuild() build_system.easyconfigs = ['ec1.eb', 'ec2.eb'] build_system.options = ['-o1', '-o2'] build_system.emit_package = True build_system.package_opts = {'type': 'rpm', 'tool-options': "'-o1 -o2'"} with osext.change_dir(tmp_path): assert build_system.emit_build_commands(environ) == [ f'export EASYBUILD_BUILDPATH={tmp_path}/easybuild/build', f'export EASYBUILD_INSTALLPATH={tmp_path}/easybuild', f'export EASYBUILD_PREFIX={tmp_path}/easybuild', f'export EASYBUILD_SOURCEPATH={tmp_path}/easybuild', 'eb ec1.eb ec2.eb -o1 -o2 --package --package-type=rpm ' "--package-tool-options='-o1 -o2'" ]
def test_concat_files(tmpdir): with osext.change_dir(tmpdir): file1 = 'in1.txt' file2 = 'in2.txt' concat_file = 'out.txt' with open(file1, 'w') as f1: f1.write('Hello1') with open(file2, 'w') as f2: f2.write('Hello2') osext.concat_files(concat_file, file1, file2, overwrite=True) with open(concat_file) as cf: out = cf.read() assert out == 'Hello1\nHello2\n'
def _read_timestamps(tasks): '''Read the timestamps and sort them to permit simple concurrency tests.''' from reframe.utility.sanity import evaluate begin_stamps = [] end_stamps = [] for t in tasks: with osext.change_dir(t.check.stagedir): with open(evaluate(t.check.stdout), 'r') as f: begin_stamps.append(float(f.readline().strip())) end_stamps.append(float(f.readline().strip())) begin_stamps.sort() end_stamps.sort() return begin_stamps, end_stamps
def test_spack_with_spec(environ, tmp_path): build_system = bs.Spack() build_system.environment = 'spack_env' build_system.specs = ['spec1@version1', 'spec2@version2'] specs_str = ' '.join(build_system.specs) with osext.change_dir(tmp_path): assert build_system.emit_build_commands(environ) == [ f'. "$(spack location --spack-root)/share/spack/setup-env.sh"', f'spack env activate -V -d {build_system.environment}', f'spack add {specs_str}', f'spack install' ] assert build_system.prepare_cmds() == [ f'. "$(spack location --spack-root)/share/spack/setup-env.sh"', f'spack env activate -V -d {build_system.environment}', f'spack load {specs_str}', ]
def report_slow_nodes(self): '''Report the base perf metrics and also all the slow nodes.''' # Only report the nodes that don't meet the perf reference with osext.change_dir(self.stagedir): key = f'{self.current_partition.fullname}:min_perf' if key in self.reference: regex = r'\[(\S+)\] GPU\s+\d\(OK\): (\d+) GF/s' nids = set(sn.extractall(regex, self.stdout, 1)) # Get the references ref, lt, ut, *_ = self.reference[key] # Flag the slow nodes for nid in nids: try: node_perf = self.min_perf(nid) val = node_perf.evaluate(cache=True) sn.assert_reference(val, ref, lt, ut).evaluate() except SanityError: self.perf_variables[nid] = node_perf
def test_import_from_file_load_directory_relative(): with osext.change_dir('reframe'): module = util.import_module_from_file('../reframe') assert reframe.VERSION == module.VERSION assert 'reframe' == module.__name__ assert module is sys.modules.get('reframe')
def test_change_dir_working(tmpdir): wd_save = os.getcwd() with osext.change_dir(tmpdir): assert os.getcwd() == tmpdir assert os.getcwd() == wd_save