def test_env_load_restore(base_environ, env0): snapshot, _ = rt.loadenv(env0) assert os.environ['_var0'] == 'val1' assert os.environ['_var1'] == 'val1' assert os.environ['_var2'] == 'val1' assert os.environ['_var3'] == 'val1' if test_util.has_sane_modules_system(): assert_modules_loaded(env0.modules) assert rt.is_env_loaded(env0) snapshot.restore() base_environ == env.snapshot() assert os.environ['_var0'] == 'val0' if test_util.has_sane_modules_system(): assert not rt.runtime().modules_system.is_module_loaded('testmod_foo') assert not rt.is_env_loaded(env0)
def modules_system(): if not test_util.has_sane_modules_system(): pytest.skip('no modules system configured') modsys = rt.runtime().modules_system modsys.searchpath_add(test_util.TEST_MODULES) # Always add a base module; this is a workaround for the modules # environment's inconsistent behaviour, that starts with an empty # LOADEDMODULES variable and ends up removing it completely if all # present modules are removed. modsys.load_module('testmod_base') yield modsys modsys.searchpath_remove(test_util.TEST_MODULES)
def test_use_module_path(run_reframe, user_exec_ctx): if not test_util.has_sane_modules_system(): pytest.skip('no modules system found') module_path = 'unittests/modules' returncode, stdout, stderr = run_reframe( more_options=[f'--module-path=+{module_path}', '--module=testmod_foo'], config_file=test_util.USER_CONFIG_FILE, action='run', system=rt.runtime().system.name) assert 'Traceback' not in stdout assert 'Traceback' not in stderr assert "could not load module 'testmod_foo' correctly" not in stdout assert returncode == 0
def test_overwrite_module_path(run_reframe, user_exec_ctx): if not test_util.has_sane_modules_system(): pytest.skip('no modules system found') module_path = 'unittests/modules' with contextlib.suppress(KeyError): module_path += f':{os.environ["MODULEPATH"]}' returncode, stdout, stderr = run_reframe( more_options=[f'--module-path={module_path}', '--module=testmod_foo'], config_file=test_util.USER_CONFIG_FILE, action='run', system=rt.runtime().system.name) assert 'Traceback' not in stdout assert 'Traceback' not in stderr assert "could not load module 'testmod_foo' correctly" not in stdout assert returncode == 0
def test_unload_module(run_reframe, user_exec_ctx): # This test is mostly for ensuring coverage. `run_reframe()` restores # the current environment, so it is not easy to verify that the modules # are indeed unloaded. However, this functionality is tested elsewhere # more exhaustively. ms = rt.runtime().modules_system if not test_util.has_sane_modules_system(): pytest.skip('no modules system found') with rt.module_use('unittests/modules'): ms.load_module('testmod_foo') returncode, stdout, stderr = run_reframe( more_options=['-u testmod_foo'], action='list') ms.unload_module('testmod_foo') assert stdout != '' assert 'Traceback' not in stdout assert 'Traceback' not in stderr assert returncode == 0
def test_emit_loadenv_commands_ignore_confict(base_environ, make_exec_ctx, env0): if not test_util.has_sane_modules_system(): pytest.skip('no modules system configured') if test_util.USER_CONFIG_FILE: make_exec_ctx(test_util.USER_CONFIG_FILE, test_util.USER_SYSTEM, options={'general/resolve_module_conflicts': False}) else: make_exec_ctx(options={'general/resolve_module_conflicts': False}) # Load a conflicting module ms = rt.runtime().modules_system with ms.change_module_path(test_util.TEST_MODULES): ms.load_module('testmod_bar') expected_commands = [ ms.emit_load_commands('testmod_foo')[0], 'export _var0=val1', 'export _var2=$_var0', 'export _var3=${_var1}', ] assert expected_commands == rt.emit_loadenv_commands(env0)