def test_path_to_top_of_module_tree_hierarchical_mns(self): """Test function to determine path to top of the module tree for a hierarchical module naming scheme.""" modtool = modules_tool() ecs_dir = os.path.join(os.path.dirname(__file__), 'easyconfigs') all_stops = [x[0] for x in EasyBlock.get_steps()] build_options = { 'check_osdeps': False, 'robot_path': [ecs_dir], 'valid_stops': all_stops, 'validate': False, } os.environ['EASYBUILD_MODULE_NAMING_SCHEME'] = 'HierarchicalMNS' init_config(build_options=build_options) self.setup_hierarchical_modules() modtool = modules_tool() mod_prefix = os.path.join(self.test_installpath, 'modules', 'all') init_modpaths = [os.path.join(mod_prefix, 'Core')] deps = ['GCC/4.7.2', 'OpenMPI/1.6.4', 'FFTW/3.3.3', 'OpenBLAS/0.2.6-LAPACK-3.4.2', 'ScaLAPACK/2.0.2-OpenBLAS-0.2.6-LAPACK-3.4.2'] path = modtool.path_to_top_of_module_tree(init_modpaths, 'goolf/1.4.10', os.path.join(mod_prefix, 'Core'), deps) self.assertEqual(path, []) path = modtool.path_to_top_of_module_tree(init_modpaths, 'GCC/4.7.2', os.path.join(mod_prefix, 'Core'), []) self.assertEqual(path, []) full_mod_subdir = os.path.join(mod_prefix, 'Compiler', 'GCC', '4.7.2') deps = ['GCC/4.7.2', 'hwloc/1.6.2'] path = modtool.path_to_top_of_module_tree(init_modpaths, 'OpenMPI/1.6.4', full_mod_subdir, deps) self.assertEqual(path, ['GCC/4.7.2']) full_mod_subdir = os.path.join(mod_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4') deps = ['GCC/4.7.2', 'OpenMPI/1.6.4'] path = modtool.path_to_top_of_module_tree(init_modpaths, 'FFTW/3.3.3', full_mod_subdir, deps) self.assertEqual(path, ['OpenMPI/1.6.4', 'GCC/4.7.2'])
def test_optimization_flags(self): """Test whether optimization flags are being set correctly.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FFLAGS', 'F90FLAGS'] # check default optimization flag (e.g. -O2) tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) self.assertTrue(tc.COMPILER_SHARED_OPTION_MAP['defaultopt'] in flags) # check other optimization flags for opt in ['noopt', 'lowopt', 'opt']: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") for enable in [True, False]: tc.set_options({opt: enable}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue(tc.COMPILER_SHARED_OPTION_MAP[opt] in flags) else: self.assertTrue(tc.COMPILER_SHARED_OPTION_MAP[opt] in flags) modules.modules_tool().purge()
def tearDown(self): """cleanup""" super(ModulesTest, self).tearDown() os.environ['MODULEPATH'] = os.pathsep.join(self.orig_modulepaths) # reinitialize a modules tool, to trigger 'module use' on module paths modules_tool()
def test_optimization_flags_combos(self): """Test whether combining optimization levels works as expected.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FFLAGS', 'F90FLAGS'] # check combining of optimization flags (doesn't make much sense) # lowest optimization should always be picked tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({'lowopt': True, 'opt':True}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) flag = '-%s' % tc.COMPILER_SHARED_OPTION_MAP['lowopt'] self.assertTrue(flag in flags) modules.modules_tool().purge() tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({'noopt': True, 'lowopt':True}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) flag = '-%s' % tc.COMPILER_SHARED_OPTION_MAP['noopt'] self.assertTrue(flag in flags) modules.modules_tool().purge() tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({'noopt':True, 'lowopt': True, 'opt':True}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) flag = '-%s' % tc.COMPILER_SHARED_OPTION_MAP['noopt'] self.assertTrue(flag in flags)
def test_precision_flags(self): """Test whether precision flags are being set correctly.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FFLAGS', 'F90FLAGS'] # check default precision flag tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.prepare() for var in flag_vars: flags = tc.get_variable(var) val = ' '.join(['-%s' % f for f in tc.COMPILER_UNIQUE_OPTION_MAP['defaultprec']]) self.assertTrue(val in flags) # check other precision flags for opt in ['strict', 'precise', 'loose', 'veryloose']: for enable in [True, False]: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({opt: enable}) tc.prepare() val = ' '.join(['-%s' % f for f in tc.COMPILER_UNIQUE_OPTION_MAP[opt]]) for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue(val in flags) else: self.assertTrue(val not in flags) modules.modules_tool().purge()
def tearDown(self): """cleanup""" os.chdir(self.cwd) os.environ['MODULEPATH'] = os.pathsep.join(self.orig_modulepaths) # reinitialize a modules tool, to trigger 'module use' on module paths modules_tool() modify_env(os.environ, self.orig_environ)
def tearDown(self): """Cleanup.""" # purge any loaded modules, restore $MODULEPATH modules.modules_tool().purge() os.environ['MODULEPATH'] = self.orig_modpath # reinitialize modules tool after touching $MODULEPATH modules.modules_tool()
def test_optimization_flags_combos(self): """Test whether combining optimization levels works as expected.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FFLAGS', 'F90FLAGS'] # check combining of optimization flags (doesn't make much sense) # lowest optimization should always be picked tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({'lowopt': True, 'opt': True}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) flag = '-%s' % tc.COMPILER_SHARED_OPTION_MAP['lowopt'] self.assertTrue(flag in flags) modules.modules_tool().purge() tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({'noopt': True, 'lowopt': True}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) flag = '-%s' % tc.COMPILER_SHARED_OPTION_MAP['noopt'] self.assertTrue(flag in flags) modules.modules_tool().purge() tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({'noopt': True, 'lowopt': True, 'opt': True}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) flag = '-%s' % tc.COMPILER_SHARED_OPTION_MAP['noopt'] self.assertTrue(flag in flags)
def test_misc_flags_unique(self): """Test whether unique compiler flags are set correctly.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FFLAGS', 'F90FLAGS'] # setting option should result in corresponding flag to be set (unique options) for opt in ['unroll', 'optarch', 'openmp']: for enable in [True, False]: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({opt: enable}) tc.prepare() if opt == 'optarch': flag = '-%s' % tc.COMPILER_OPTIMAL_ARCHITECTURE_OPTION[ tc.arch] else: flag = '-%s' % tc.COMPILER_UNIQUE_OPTION_MAP[opt] for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue( flag in flags, "%s: True means %s in %s" % (opt, flag, flags)) else: self.assertTrue( flag not in flags, "%s: False means no %s in %s" % (opt, flag, flags)) modules.modules_tool().purge()
def test_prepare_deps_external(self): """Test preparing for a toolchain when dependencies and external modules are involved.""" deps = [ { 'name': 'OpenMPI', 'version': '1.6.4', 'full_mod_name': 'OpenMPI/1.6.4-GCC-4.6.4', 'short_mod_name': 'OpenMPI/1.6.4-GCC-4.6.4', 'external_module': False, 'external_module_metadata': {}, }, # no metadata available { 'name': None, 'version': None, 'full_mod_name': 'toy/0.0', 'short_mod_name': 'toy/0.0', 'external_module': True, 'external_module_metadata': {}, } ] tc = self.get_toolchain('GCC', version='4.6.4') tc.add_dependencies(deps) tc.prepare() mods = [ 'GCC/4.6.4', 'hwloc/1.6.2-GCC-4.6.4', 'OpenMPI/1.6.4-GCC-4.6.4', 'toy/0.0' ] self.assertTrue([m['mod_name'] for m in modules_tool().list()], mods) self.assertTrue(os.environ['EBROOTTOY'].endswith('software/toy/0.0')) self.assertEqual(os.environ['EBVERSIONTOY'], '0.0') self.assertFalse('EBROOTFOOBAR' in os.environ) # with metadata deps[1] = { 'full_mod_name': 'toy/0.0', 'short_mod_name': 'toy/0.0', 'external_module': True, 'external_module_metadata': { 'name': ['toy', 'foobar'], 'version': ['1.2.3', '4.5'], 'prefix': 'FOOBAR_PREFIX', } } tc = self.get_toolchain('GCC', version='4.6.4') tc.add_dependencies(deps) os.environ['FOOBAR_PREFIX'] = '/foo/bar' tc.prepare() mods = [ 'GCC/4.6.4', 'hwloc/1.6.2-GCC-4.6.4', 'OpenMPI/1.6.4-GCC-4.6.4', 'toy/0.0' ] self.assertTrue([m['mod_name'] for m in modules_tool().list()], mods) self.assertEqual(os.environ['EBROOTTOY'], '/foo/bar') self.assertEqual(os.environ['EBVERSIONTOY'], '1.2.3') self.assertEqual(os.environ['EBROOTFOOBAR'], '/foo/bar') self.assertEqual(os.environ['EBVERSIONFOOBAR'], '4.5') self.assertEqual(modules.get_software_root('foobar'), '/foo/bar') self.assertEqual(modules.get_software_version('toy'), '1.2.3')
def test_optimization_flags(self): """Test whether optimization flags are being set correctly.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FFLAGS', 'F90FLAGS'] # check default optimization flag (e.g. -O2) tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) self.assertTrue( tc.COMPILER_SHARED_OPTION_MAP['defaultopt'] in flags) # check other optimization flags for opt in ['noopt', 'lowopt', 'opt']: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") for enable in [True, False]: tc.set_options({opt: enable}) tc.prepare() for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue( tc.COMPILER_SHARED_OPTION_MAP[opt] in flags) else: self.assertTrue( tc.COMPILER_SHARED_OPTION_MAP[opt] in flags) modules.modules_tool().purge()
def test_override_optarch(self): """Test whether overriding the optarch flag works.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FFLAGS', 'F90FLAGS'] for optarch_var in ['march=lovelylovelysandybridge', None]: build_options = {'optarch': optarch_var} init_config(build_options=build_options) for enable in [True, False]: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({'optarch': enable}) tc.prepare() flag = None if optarch_var is not None: flag = '-%s' % optarch_var else: # default optarch flag flag = tc.COMPILER_OPTIMAL_ARCHITECTURE_OPTION[tc.arch] for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue( flag in flags, "optarch: True means %s in %s" % (flag, flags)) else: self.assertFalse( flag in flags, "optarch: False means no %s in %s" % (flag, flags)) modules.modules_tool().purge()
def test_precision_flags(self): """Test whether precision flags are being set correctly.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FFLAGS', 'F90FLAGS'] # check default precision flag tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.prepare() for var in flag_vars: flags = tc.get_variable(var) val = ' '.join([ '-%s' % f for f in tc.COMPILER_UNIQUE_OPTION_MAP['defaultprec'] ]) self.assertTrue(val in flags) # check other precision flags for opt in ['strict', 'precise', 'loose', 'veryloose']: for enable in [True, False]: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({opt: enable}) tc.prepare() val = ' '.join( ['-%s' % f for f in tc.COMPILER_UNIQUE_OPTION_MAP[opt]]) for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue(val in flags) else: self.assertTrue(val not in flags) modules.modules_tool().purge()
def tearDown(self): """cleanup""" os.environ['MODULEPATH'] = os.pathsep.join(self.orig_modulepaths) # reinitialize a modules tool, to trigger 'module use' on module paths modules_tool() # restore (full) original environment modify_env(os.environ, self.orig_environ)
def test_ictce_toolchain(self): """Test for ictce toolchain.""" tmpdir, imkl_module_path, imkl_module_txt = self.setup_sandbox_for_intel_fftw( ) tc = self.get_toolchain("ictce", version="4.1.13") tc.prepare() self.assertEqual(tc.get_variable('CC'), 'icc') self.assertEqual(tc.get_variable('CXX'), 'icpc') self.assertEqual(tc.get_variable('F77'), 'ifort') self.assertEqual(tc.get_variable('F90'), 'ifort') self.assertEqual(tc.get_variable('FC'), 'ifort') modules.modules_tool().purge() tc = self.get_toolchain("ictce", version="4.1.13") opts = {'usempi': True} tc.set_options(opts) tc.prepare() self.assertEqual(tc.get_variable('CC'), 'mpicc') self.assertEqual(tc.get_variable('CXX'), 'mpicxx') self.assertEqual(tc.get_variable('F77'), 'mpif77') self.assertEqual(tc.get_variable('F90'), 'mpif90') self.assertEqual(tc.get_variable('FC'), 'mpif90') self.assertEqual(tc.get_variable('MPICC'), 'mpicc') self.assertEqual(tc.get_variable('MPICXX'), 'mpicxx') self.assertEqual(tc.get_variable('MPIF77'), 'mpif77') self.assertEqual(tc.get_variable('MPIF90'), 'mpif90') self.assertEqual(tc.get_variable('MPIFC'), 'mpif90') modules.modules_tool().purge() tc = self.get_toolchain("ictce", version="4.1.13") opts = {'usempi': True, 'openmp': True} tc.set_options(opts) tc.prepare() self.assertTrue('-mt_mpi' in tc.get_variable('CFLAGS')) self.assertTrue('-mt_mpi' in tc.get_variable('CXXFLAGS')) self.assertTrue('-mt_mpi' in tc.get_variable('FCFLAGS')) self.assertTrue('-mt_mpi' in tc.get_variable('FFLAGS')) self.assertTrue('-mt_mpi' in tc.get_variable('F90FLAGS')) self.assertEqual(tc.get_variable('CC'), 'mpicc') self.assertEqual(tc.get_variable('CXX'), 'mpicxx') self.assertEqual(tc.get_variable('F77'), 'mpif77') self.assertEqual(tc.get_variable('F90'), 'mpif90') self.assertEqual(tc.get_variable('FC'), 'mpif90') self.assertEqual(tc.get_variable('MPICC'), 'mpicc') self.assertEqual(tc.get_variable('MPICXX'), 'mpicxx') self.assertEqual(tc.get_variable('MPIF77'), 'mpif77') self.assertEqual(tc.get_variable('MPIF90'), 'mpif90') self.assertEqual(tc.get_variable('MPIFC'), 'mpif90') # cleanup shutil.rmtree(tmpdir) write_file(imkl_module_path, imkl_module_txt)
def setUp(self): """Set up everything for a unit test.""" super(ToolchainTest, self).setUp() # start with a clean slate modules.modules_tool().purge() # make sure path with modules for testing is added to MODULEPATH self.orig_modpath = os.environ.get('MODULEPATH', '') os.environ['MODULEPATH'] = find_full_path(os.path.join('test', 'framework', 'modules'))
def tearDown(self): """Cleanup.""" # purge any loaded modules before restoring $MODULEPATH modules.modules_tool().purge() super(ToolchainTest, self).tearDown() os.environ['MODULEPATH'] = self.orig_modpath # reinitialize modules tool after touching $MODULEPATH modules.modules_tool()
def setUp(self): """set up everything for a unit test.""" super(ModulesTest, self).setUp() self.orig_modulepaths = os.environ.get('MODULEPATH', '').split(os.pathsep) self.testmods = None # purge with original $MODULEPATH before running each test # purging fails if module path for one of the loaded modules is no longer in $MODULEPATH modules_tool().purge()
def test_ictce_toolchain(self): """Test for ictce toolchain.""" tmpdir, imkl_module_path, imkl_module_txt = self.setup_sandbox_for_intel_fftw() tc = self.get_toolchain("ictce", version="4.1.13") tc.prepare() self.assertEqual(tc.get_variable('CC'), 'icc') self.assertEqual(tc.get_variable('CXX'), 'icpc') self.assertEqual(tc.get_variable('F77'), 'ifort') self.assertEqual(tc.get_variable('F90'), 'ifort') self.assertEqual(tc.get_variable('FC'), 'ifort') modules.modules_tool().purge() tc = self.get_toolchain("ictce", version="4.1.13") opts = {'usempi': True} tc.set_options(opts) tc.prepare() self.assertEqual(tc.get_variable('CC'), 'mpicc') self.assertEqual(tc.get_variable('CXX'), 'mpicxx') self.assertEqual(tc.get_variable('F77'), 'mpif77') self.assertEqual(tc.get_variable('F90'), 'mpif90') self.assertEqual(tc.get_variable('FC'), 'mpif90') self.assertEqual(tc.get_variable('MPICC'), 'mpicc') self.assertEqual(tc.get_variable('MPICXX'), 'mpicxx') self.assertEqual(tc.get_variable('MPIF77'), 'mpif77') self.assertEqual(tc.get_variable('MPIF90'), 'mpif90') self.assertEqual(tc.get_variable('MPIFC'), 'mpif90') modules.modules_tool().purge() tc = self.get_toolchain("ictce", version="4.1.13") opts = {'usempi': True, 'openmp': True} tc.set_options(opts) tc.prepare() self.assertTrue('-mt_mpi' in tc.get_variable('CFLAGS')) self.assertTrue('-mt_mpi' in tc.get_variable('CXXFLAGS')) self.assertTrue('-mt_mpi' in tc.get_variable('FCFLAGS')) self.assertTrue('-mt_mpi' in tc.get_variable('FFLAGS')) self.assertTrue('-mt_mpi' in tc.get_variable('F90FLAGS')) self.assertEqual(tc.get_variable('CC'), 'mpicc') self.assertEqual(tc.get_variable('CXX'), 'mpicxx') self.assertEqual(tc.get_variable('F77'), 'mpif77') self.assertEqual(tc.get_variable('F90'), 'mpif90') self.assertEqual(tc.get_variable('FC'), 'mpif90') self.assertEqual(tc.get_variable('MPICC'), 'mpicc') self.assertEqual(tc.get_variable('MPICXX'), 'mpicxx') self.assertEqual(tc.get_variable('MPIF77'), 'mpif77') self.assertEqual(tc.get_variable('MPIF90'), 'mpif90') self.assertEqual(tc.get_variable('MPIFC'), 'mpif90') # cleanup shutil.rmtree(tmpdir) write_file(imkl_module_path, imkl_module_txt)
def setUp(self): """Set up everything for a unit test.""" super(ToolchainTest, self).setUp() # start with a clean slate modules.modules_tool().purge() # make sure path with modules for testing is added to MODULEPATH self.orig_modpath = os.environ.get('MODULEPATH', '') os.environ['MODULEPATH'] = find_full_path( os.path.join('test', 'framework', 'modules'))
def test_prepare_deps_external(self): """Test preparing for a toolchain when dependencies and external modules are involved.""" deps = [ { 'name': 'OpenMPI', 'version': '1.6.4', 'full_mod_name': 'OpenMPI/1.6.4-GCC-4.6.4', 'short_mod_name': 'OpenMPI/1.6.4-GCC-4.6.4', 'external_module': False, 'external_module_metadata': {}, }, # no metadata available { 'name': None, 'version': None, 'full_mod_name': 'toy/0.0', 'short_mod_name': 'toy/0.0', 'external_module': True, 'external_module_metadata': {}, } ] tc = self.get_toolchain('GCC', version='4.6.4') tc.add_dependencies(deps) tc.prepare() mods = ['GCC/4.6.4', 'hwloc/1.6.2-GCC-4.6.4', 'OpenMPI/1.6.4-GCC-4.6.4', 'toy/0.0'] self.assertTrue([m['mod_name'] for m in modules_tool().list()], mods) self.assertTrue(os.environ['EBROOTTOY'].endswith('software/toy/0.0')) self.assertEqual(os.environ['EBVERSIONTOY'], '0.0') self.assertFalse('EBROOTFOOBAR' in os.environ) # with metadata deps[1] = { 'full_mod_name': 'toy/0.0', 'short_mod_name': 'toy/0.0', 'external_module': True, 'external_module_metadata': { 'name': ['toy', 'foobar'], 'version': ['1.2.3', '4.5'], 'prefix': 'FOOBAR_PREFIX', } } tc = self.get_toolchain('GCC', version='4.6.4') tc.add_dependencies(deps) os.environ['FOOBAR_PREFIX'] = '/foo/bar' tc.prepare() mods = ['GCC/4.6.4', 'hwloc/1.6.2-GCC-4.6.4', 'OpenMPI/1.6.4-GCC-4.6.4', 'toy/0.0'] self.assertTrue([m['mod_name'] for m in modules_tool().list()], mods) self.assertEqual(os.environ['EBROOTTOY'], '/foo/bar') self.assertEqual(os.environ['EBVERSIONTOY'], '1.2.3') self.assertEqual(os.environ['EBROOTFOOBAR'], '/foo/bar') self.assertEqual(os.environ['EBVERSIONFOOBAR'], '4.5') self.assertEqual(modules.get_software_root('foobar'), '/foo/bar') self.assertEqual(modules.get_software_version('toy'), '1.2.3')
def test_modules_tool_stateless(self): """Check whether ModulesTool instance is stateless between runs.""" test_modules_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules') # copy test Core/Compiler modules, we need to rewrite the 'module use' statement in the one we're going to load shutil.copytree(os.path.join(test_modules_path, 'Core'), os.path.join(self.test_prefix, 'Core')) shutil.copytree(os.path.join(test_modules_path, 'Compiler'), os.path.join(self.test_prefix, 'Compiler')) modtxt = read_file(os.path.join(self.test_prefix, 'Core', 'GCC', '4.7.2')) modpath_extension = os.path.join(self.test_prefix, 'Compiler', 'GCC', '4.7.2') modtxt = re.sub('module use .*', 'module use %s' % modpath_extension, modtxt, re.M) write_file(os.path.join(self.test_prefix, 'Core', 'GCC', '4.7.2'), modtxt) modtxt = read_file(os.path.join(self.test_prefix, 'Compiler', 'GCC', '4.7.2', 'OpenMPI', '1.6.4')) modpath_extension = os.path.join(self.test_prefix, 'MPI', 'GCC', '4.7.2', 'OpenMPI', '1.6.4') mkdir(modpath_extension, parents=True) modtxt = re.sub('module use .*', 'module use %s' % modpath_extension, modtxt, re.M) write_file(os.path.join(self.test_prefix, 'Compiler', 'GCC', '4.7.2', 'OpenMPI', '1.6.4'), modtxt) # force reset of any singletons by reinitiating config init_config() os.environ['MODULEPATH'] = os.path.join(self.test_prefix, 'Core') modtool = modules_tool() if isinstance(modtool, Lmod): load_err_msg = "cannot[\s\n]*be[\s\n]*loaded" else: load_err_msg = "Unable to locate a modulefile" # GCC/4.6.3 is *not* an available Core module self.assertErrorRegex(EasyBuildError, load_err_msg, modtool.load, ['GCC/4.6.3']) # GCC/4.7.2 is one of the available Core modules modtool.load(['GCC/4.7.2']) # OpenMPI/1.6.4 becomes available after loading GCC/4.7.2 module modtool.load(['OpenMPI/1.6.4']) modtool.purge() # reset $MODULEPATH, obtain new ModulesTool instance, # which should not remember anything w.r.t. previous $MODULEPATH value os.environ['MODULEPATH'] = test_modules_path modtool = modules_tool() # GCC/4.6.3 is available modtool.load(['GCC/4.6.3']) modtool.purge() # GCC/4.7.2 is available (note: also as non-Core module outside of hierarchy) modtool.load(['GCC/4.7.2']) # OpenMPI/1.6.4 is *not* available with current $MODULEPATH (loaded GCC/4.7.2 was not a hierarchical module) self.assertErrorRegex(EasyBuildError, load_err_msg, modtool.load, ['OpenMPI/1.6.4'])
def setUp(self): """Set up everything for a unit test.""" # initialize configuration so config.get_modules_tool function works eb_go = eboptions.parse_options() config.init(eb_go.options, eb_go.get_options_by_section('config')) # start with a clean slate modules.modules_tool().purge() # make sure path with modules for testing is added to MODULEPATH self.orig_modpath = os.environ.get('MODULEPATH', '') os.environ['MODULEPATH'] = find_full_path(os.path.join('test', 'framework', 'modules'))
def setUp(self): """Testcase setup.""" super(ModulesToolTest, self).setUp() # keep track of original $MODULEPATH, so we can restore it self.orig_modulepaths = os.environ.get('MODULEPATH', '').split(os.pathsep) # purge with original $MODULEPATH before running each test # purging fails if module path for one of the loaded modules is no longer in $MODULEPATH modules_tool().purge() # keep track of original 'module' function definition so we can restore it self.orig_module = os.environ.get('module', None)
def setUp(self): """Set up everything for a unit test.""" # initialize configuration so config.get_modules_tool function works eb_go = eboptions.parse_options() config.init(eb_go.options, eb_go.get_options_by_section('config')) # start with a clean slate modules.modules_tool().purge() # make sure path with modules for testing is added to MODULEPATH self.orig_modpath = os.environ.get('MODULEPATH', '') os.environ['MODULEPATH'] = find_full_path( os.path.join('test', 'framework', 'modules'))
def tearDown(self): """Testcase cleanup.""" super(ModulesToolTest, self).tearDown() os.environ['MODULEPATH'] = os.pathsep.join(self.orig_modulepaths) # reinitialize a modules tool, to trigger 'module use' on module paths modules_tool() # restore 'module' function if self.orig_module is not None: os.environ['module'] = self.orig_module else: if 'module' in os.environ: del os.environ['module']
def setUp(self): """set up everything for a unit test.""" # keep track of original environment, so we can restore it self.orig_environ = copy.deepcopy(os.environ) # initialize configuration so config.get_modules_tool function works eb_go = eboptions.parse_options() config.init(eb_go.options, eb_go.get_options_by_section('config')) # keep track of original $MODULEPATH, so we can restore it self.orig_modulepaths = os.environ.get('MODULEPATH', '').split(os.pathsep) # purge with original $MODULEPATH before running each test # purging fails if module path for one of the loaded modules is no longer in $MODULEPATH modules_tool().purge()
def __init__(self, name=None, version=None, mns=None): """Toolchain constructor.""" self.base_init() self.dependencies = [] self.toolchain_dep_mods = [] if name is None: name = self.NAME if name is None: self.log.raiseException("init: no name provided") self.name = name if version is None: version = self.VERSION if version is None: self.log.raiseException("init: no version provided") self.version = version self.vars = None self.modules_tool = modules_tool() self.mns = mns self.mod_full_name = None self.mod_short_name = None self.init_modpaths = None if self.name != DUMMY_TOOLCHAIN_NAME: # sometimes no module naming scheme class instance can/will be provided, e.g. with --list-toolchains if self.mns is not None: tc_dict = self.as_dict() self.mod_full_name = self.mns.det_full_module_name(tc_dict) self.mod_short_name = self.mns.det_short_module_name(tc_dict) self.init_modpaths = self.mns.det_init_modulepaths(tc_dict)
def load_module(self, mod_name, recursive_unload=False, depends_on=False, unload_modules=None): """ Generate load statement for specified module. :param mod_name: name of module to generate load statement for :param recursive_unload: boolean indicating whether the 'load' statement should be reverted on unload :param unload_module: name(s) of module to unload first """ body = [] if unload_modules: body.extend([self.unload_module(m).strip() for m in unload_modules]) load_template = self.LOAD_TEMPLATE # Lmod 7.6.1+ supports depends-on which does this most nicely: if build_option('mod_depends_on') or depends_on: if not modules_tool().supports_depends_on: raise EasyBuildError("depends-on statements in generated module are not supported by modules tool") load_template = self.LOAD_TEMPLATE_DEPENDS_ON body.append(load_template) if build_option('recursive_mod_unload') or recursive_unload or load_template == self.LOAD_TEMPLATE_DEPENDS_ON: # not wrapping the 'module load' with an is-loaded guard ensures recursive unloading; # when "module unload" is called on the module in which the dependency "module load" is present, # it will get translated to "module unload" load_statement = body + [''] else: load_statement = [self.conditional_statement("is-loaded %(mod_name)s", '\n'.join(body), negative=True)] return '\n'.join([''] + load_statement) % {'mod_name': mod_name}
def test_modulefile_path(self): """Test modulefile_path method""" test_dir = os.path.abspath(os.path.dirname(__file__)) gcc_mod_file = os.path.join(test_dir, 'modules', 'GCC', '4.7.2') modtool = modules_tool() res = modtool.modulefile_path('GCC/4.7.2') self.assertTrue(os.path.samefile(res, gcc_mod_file)) if isinstance(self.modtool, Lmod): res = modtool.modulefile_path('bzip2/.1.0.6') self.assertTrue(os.path.samefile(res, os.path.join(test_dir, 'modules', 'bzip2', '.1.0.6.lua'))) res = modtool.modulefile_path('bzip2/.1.0.6', strip_ext=True) self.assertTrue(res.endswith('test/framework/modules/bzip2/.1.0.6')) # hack into 'module show GCC/4.7.2' cache and inject alternate output that modulecmd.tcl sometimes produces # make sure we only extract the module file path, nothing else... # cfr. https://github.com/easybuilders/easybuild/issues/368 modulepath = os.environ['MODULEPATH'].split(':') mod_show_cache_key = modtool.mk_module_cache_key('GCC/4.7.2') mod.MODULE_SHOW_CACHE[mod_show_cache_key] = '\n'.join([ "import os", "os.environ['MODULEPATH_modshare'] = '%s'" % ':'.join(m + ':1' for m in modulepath), "os.environ['MODULEPATH'] = '%s'" % ':'.join(modulepath), "------------------------------------------------------------------------------", "%s:" % gcc_mod_file, "------------------------------------------------------------------------------", # remainder of output doesn't really matter in this context "setenv EBROOTGCC /prefix/GCC/4.7.2" ]) res = modtool.modulefile_path('GCC/4.7.2') self.assertTrue(os.path.samefile(res, os.path.join(test_dir, 'modules', 'GCC', '4.7.2'))) reset_module_caches()
def find_resolved_modules(unprocessed, avail_modules, retain_all_deps=False): """ Find easyconfigs in 1st argument which can be fully resolved using modules specified in 2nd argument """ ordered_ecs = [] new_avail_modules = avail_modules[:] new_unprocessed = [] modtool = modules_tool() for ec in unprocessed: new_ec = ec.copy() deps = [] for dep in new_ec['dependencies']: full_mod_name = ActiveMNS().det_full_module_name(dep) dep_resolved = full_mod_name in new_avail_modules if not retain_all_deps: # hidden modules need special care, since they may not be included in list of available modules dep_resolved |= dep['hidden'] and modtool.exist( [full_mod_name])[0] if not dep_resolved: deps.append(dep) new_ec['dependencies'] = deps if len(new_ec['dependencies']) == 0: _log.debug("Adding easyconfig %s to final list" % new_ec['spec']) ordered_ecs.append(new_ec) new_avail_modules.append(ec['full_mod_name']) else: new_unprocessed.append(new_ec) return ordered_ecs, new_unprocessed, new_avail_modules
def find_resolved_modules(unprocessed, avail_modules, retain_all_deps=False): """ Find easyconfigs in 1st argument which can be fully resolved using modules specified in 2nd argument """ ordered_ecs = [] new_avail_modules = avail_modules[:] new_unprocessed = [] modtool = modules_tool() for ec in unprocessed: new_ec = ec.copy() deps = [] for dep in new_ec['dependencies']: full_mod_name = ActiveMNS().det_full_module_name(dep) dep_resolved = full_mod_name in new_avail_modules if not retain_all_deps: # hidden modules need special care, since they may not be included in list of available modules dep_resolved |= dep['hidden'] and modtool.exist([full_mod_name])[0] if not dep_resolved: deps.append(dep) new_ec['dependencies'] = deps if len(new_ec['dependencies']) == 0: _log.debug("Adding easyconfig %s to final list" % new_ec['spec']) ordered_ecs.append(new_ec) new_avail_modules.append(ec['full_mod_name']) else: new_unprocessed.append(new_ec) return ordered_ecs, new_unprocessed, new_avail_modules
def setUp(self): """set up everything for a unit test.""" # keep track of original environment, so we can restore it self.orig_environ = copy.deepcopy(os.environ) # initialize configuration so config.get_modules_tool function works eb_go = eboptions.parse_options() config.init(eb_go.options, eb_go.get_options_by_section('config')) self.cwd = os.getcwd() self.orig_modulepaths = os.environ.get('MODULEPATH', '').split(os.pathsep) self.testmods = None # purge with original $MODULEPATH before running each test # purging fails if module path for one of the loaded modules is no longer in $MODULEPATH modules_tool().purge()
def setUp(self): """Set up testcase.""" super(EnhancedTestCase, self).setUp() self.log = fancylogger.getLogger(self.__class__.__name__, fname=False) fd, self.logfile = tempfile.mkstemp(suffix='.log', prefix='eb-test-') os.close(fd) self.cwd = os.getcwd() self.test_prefix = tempfile.mkdtemp() # keep track of original environment to restore self.orig_environ = copy.deepcopy(os.environ) # keep track of original environment/Python search path to restore self.orig_sys_path = sys.path[:] self.orig_paths = {} for path in ['buildpath', 'installpath', 'sourcepath']: self.orig_paths[path] = os.environ.get('EASYBUILD_%s' % path.upper(), None) testdir = os.path.dirname(os.path.abspath(__file__)) self.test_sourcepath = os.path.join(testdir, 'sandbox', 'sources') os.environ['EASYBUILD_SOURCEPATH'] = self.test_sourcepath os.environ['EASYBUILD_PREFIX'] = self.test_prefix self.test_buildpath = tempfile.mkdtemp() os.environ['EASYBUILD_BUILDPATH'] = self.test_buildpath self.test_installpath = tempfile.mkdtemp() os.environ['EASYBUILD_INSTALLPATH'] = self.test_installpath # make sure that the tests only pick up easyconfigs provided with the tests os.environ['EASYBUILD_ROBOT_PATHS'] = os.path.join(testdir, 'easyconfigs') # make sure no deprecated behaviour is being triggered (unless intended by the test) # trip *all* log.deprecated statements by setting deprecation version ridiculously high self.orig_current_version = eb_build_log.CURRENT_VERSION os.environ['EASYBUILD_DEPRECATED'] = '10000000' init_config() # remove any entries in Python search path that seem to provide easyblocks for path in sys.path[:]: if os.path.exists(os.path.join(path, 'easybuild', 'easyblocks', '__init__.py')): sys.path.remove(path) # add test easyblocks to Python search path and (re)import and reload easybuild modules import easybuild sys.path.append(os.path.join(testdir, 'sandbox')) reload(easybuild) import easybuild.easyblocks reload(easybuild.easyblocks) import easybuild.easyblocks.generic reload(easybuild.easyblocks.generic) reload(easybuild.tools.module_naming_scheme) # required to run options unit tests stand-alone modtool = modules_tool() self.reset_modulepath([os.path.join(testdir, 'modules')]) # purge out any loaded modules with original $MODULEPATH before running each test modtool.purge()
def test_external_dependencies(self): """Test specifying external (build) dependencies.""" ectxt = read_file(os.path.join(os.path.dirname(os.path.abspath(__file__)), "easyconfigs", "toy-0.0-deps.eb")) toy_ec = os.path.join(self.test_prefix, "toy-0.0-external-deps.eb") # just specify some of the test modules we ship, doesn't matter where they come from extraectxt = "\ndependencies += [('foobar/1.2.3', EXTERNAL_MODULE)]" extraectxt += "\nbuilddependencies = [('somebuilddep/0.1', EXTERNAL_MODULE)]" extraectxt += "\nversionsuffix = '-external-deps'" write_file(toy_ec, ectxt + extraectxt) # install dummy modules modulepath = os.path.join(self.test_prefix, "modules") for mod in ["ictce/4.1.13", "GCC/4.7.2", "foobar/1.2.3", "somebuilddep/0.1"]: mkdir(os.path.join(modulepath, os.path.dirname(mod)), parents=True) write_file(os.path.join(modulepath, mod), "#%Module") self.reset_modulepath([modulepath]) self.test_toy_build(ec_file=toy_ec, versionsuffix="-external-deps", verbose=True) modules_tool().load(["toy/0.0-external-deps"]) # note build dependency is not loaded mods = ["ictce/4.1.13", "GCC/4.7.2", "foobar/1.2.3", "toy/0.0-external-deps"] self.assertEqual([x["mod_name"] for x in modules_tool().list()], mods) # check behaviour when a non-existing external (build) dependency is included err_msg = "Missing modules for one or more dependencies marked as external modules:" extraectxt = "\nbuilddependencies = [('nosuchbuilddep/0.0.0', EXTERNAL_MODULE)]" extraectxt += "\nversionsuffix = '-external-deps-broken1'" write_file(toy_ec, ectxt + extraectxt) self.assertErrorRegex( EasyBuildError, err_msg, self.test_toy_build, ec_file=toy_ec, raise_error=True, verbose=False ) extraectxt = "\ndependencies += [('nosuchmodule/1.2.3', EXTERNAL_MODULE)]" extraectxt += "\nversionsuffix = '-external-deps-broken2'" write_file(toy_ec, ectxt + extraectxt) self.assertErrorRegex( EasyBuildError, err_msg, self.test_toy_build, ec_file=toy_ec, raise_error=True, verbose=False ) # --dry-run still works when external modules are missing; external modules are treated as if they were there outtxt = self.test_toy_build(ec_file=toy_ec, verbose=True, extra_args=["--dry-run"], verify=False) self.assertTrue(re.search(r"^ \* \[ \] .* \(module: toy/0.0-external-deps-broken2\)", outtxt, re.M))
def init_testmods(self, test_modules_paths=None): """Initialize set of test modules for test.""" if test_modules_paths is None: test_modules_paths = [ os.path.abspath( os.path.join(os.path.dirname(__file__), 'modules')) ] self.testmods = modules_tool(test_modules_paths)
def setUp(self): """Set up testcase.""" self.log = fancylogger.getLogger(self.__class__.__name__, fname=False) fd, self.logfile = tempfile.mkstemp(suffix='.log', prefix='eb-test-') os.close(fd) self.cwd = os.getcwd() # keep track of original environment to restore self.orig_environ = copy.deepcopy(os.environ) # keep track of original environment/Python search path to restore self.orig_sys_path = sys.path[:] self.orig_paths = {} for path in ['buildpath', 'installpath', 'sourcepath']: self.orig_paths[path] = os.environ.get( 'EASYBUILD_%s' % path.upper(), None) testdir = os.path.dirname(os.path.abspath(__file__)) self.test_sourcepath = os.path.join(testdir, 'sandbox', 'sources') os.environ['EASYBUILD_SOURCEPATH'] = self.test_sourcepath self.test_buildpath = tempfile.mkdtemp() os.environ['EASYBUILD_BUILDPATH'] = self.test_buildpath self.test_installpath = tempfile.mkdtemp() os.environ['EASYBUILD_INSTALLPATH'] = self.test_installpath init_config() # add test easyblocks to Python search path and (re)import and reload easybuild modules import easybuild sys.path.append(os.path.join(testdir, 'sandbox')) reload(easybuild) import easybuild.easyblocks reload(easybuild.easyblocks) import easybuild.easyblocks.generic reload(easybuild.easyblocks.generic) reload(easybuild.tools.module_naming_scheme ) # required to run options unit tests stand-alone # set MODULEPATH to included test modules os.environ['MODULEPATH'] = os.path.join(testdir, 'modules') # purge out any loaded modules with original $MODULEPATH before running each test modules_tool().purge()
def test_misc_flags_unique_fortran(self): """Test whether unique Fortran compiler flags are set correctly.""" flag_vars = ['FCFLAGS', 'FFLAGS', 'F90FLAGS'] # setting option should result in corresponding flag to be set (Fortran unique options) for opt in ['i8', 'r8']: for enable in [True, False]: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({opt: enable}) tc.prepare() flag = '-%s' % tc.COMPILER_UNIQUE_OPTION_MAP[opt] for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue(flag in flags, "%s: True means %s in %s" % (opt, flag, flags)) else: self.assertTrue(flag not in flags, "%s: False means no %s in %s" % (opt, flag, flags)) modules.modules_tool().purge()
def test_misc_flags_unique_fortran(self): """Test whether unique Fortran compiler flags are set correctly.""" flag_vars = ['FFLAGS', 'F90FLAGS'] # setting option should result in corresponding flag to be set (Fortran unique options) for opt in ['i8', 'r8']: for enable in [True, False]: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({opt: enable}) tc.prepare() flag = '-%s' % tc.COMPILER_UNIQUE_OPTION_MAP[opt] for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue(flag in flags, "%s: True means %s in %s" % (opt, flag, flags)) else: self.assertTrue(flag not in flags, "%s: False means no %s in %s" % (opt, flag, flags)) modules.modules_tool().purge()
def test_external_dependencies(self): """Test specifying external (build) dependencies.""" ectxt = read_file(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'easyconfigs', 'toy-0.0-deps.eb')) toy_ec = os.path.join(self.test_prefix, 'toy-0.0-external-deps.eb') # just specify some of the test modules we ship, doesn't matter where they come from extraectxt = "\ndependencies += [('foobar/1.2.3', EXTERNAL_MODULE)]" extraectxt += "\nbuilddependencies = [('somebuilddep/0.1', EXTERNAL_MODULE)]" extraectxt += "\nversionsuffix = '-external-deps'" write_file(toy_ec, ectxt + extraectxt) # install dummy modules modulepath = os.path.join(self.test_prefix, 'modules') for mod in ['ictce/4.1.13', 'GCC/4.7.2', 'foobar/1.2.3', 'somebuilddep/0.1']: mkdir(os.path.join(modulepath, os.path.dirname(mod)), parents=True) write_file(os.path.join(modulepath, mod), "#%Module") self.reset_modulepath([modulepath]) self.test_toy_build(ec_file=toy_ec, versionsuffix='-external-deps', verbose=True) modules_tool().load(['toy/0.0-external-deps']) # note build dependency is not loaded mods = ['ictce/4.1.13', 'GCC/4.7.2', 'foobar/1.2.3', 'toy/0.0-external-deps'] self.assertEqual([x['mod_name'] for x in modules_tool().list()], mods) # check behaviour when a non-existing external (build) dependency is included err_msg = "Missing modules for one or more dependencies marked as external modules:" extraectxt = "\nbuilddependencies = [('nosuchbuilddep/0.0.0', EXTERNAL_MODULE)]" extraectxt += "\nversionsuffix = '-external-deps-broken1'" write_file(toy_ec, ectxt + extraectxt) self.assertErrorRegex(EasyBuildError, err_msg, self.test_toy_build, ec_file=toy_ec, raise_error=True, verbose=False) extraectxt = "\ndependencies += [('nosuchmodule/1.2.3', EXTERNAL_MODULE)]" extraectxt += "\nversionsuffix = '-external-deps-broken2'" write_file(toy_ec, ectxt + extraectxt) self.assertErrorRegex(EasyBuildError, err_msg, self.test_toy_build, ec_file=toy_ec, raise_error=True, verbose=False) # --dry-run still works when external modules are missing; external modules are treated as if they were there outtxt = self.test_toy_build(ec_file=toy_ec, verbose=True, extra_args=['--dry-run'], verify=False) self.assertTrue(re.search(r"^ \* \[ \] .* \(module: toy/0.0-external-deps-broken2\)", outtxt, re.M))
def find_resolved_modules(easyconfigs, avail_modules, retain_all_deps=False): """ Find easyconfigs in 1st argument which can be fully resolved using modules specified in 2nd argument @param easyconfigs: list of parsed easyconfigs @param avail_modules: list of available modules @param retain_all_deps: retain all dependencies, regardless of whether modules are available for them or not """ ordered_ecs = [] new_easyconfigs = [] modtool = modules_tool() # copy, we don't want to modify the origin list of available modules avail_modules = avail_modules[:] _log.debug("Finding resolved modules for %s (available modules: %s)", easyconfigs, avail_modules) for easyconfig in easyconfigs: new_ec = easyconfig.copy() deps = [] for dep in new_ec['dependencies']: full_mod_name = dep.get('full_mod_name', ActiveMNS().det_full_module_name(dep)) # treat external modules as resolved when retain_all_deps is enabled (e.g., under --dry-run), # since no corresponding easyconfig can be found for them if retain_all_deps and dep.get('external_module', False): _log.debug( "Treating dependency marked as external dependency as resolved: %s", dep) elif retain_all_deps and full_mod_name not in avail_modules: # if all dependencies should be retained, include dep unless it has been already _log.debug("Retaining new dep %s in 'retain all deps' mode", dep) deps.append(dep) elif not module_is_available(full_mod_name, modtool, avail_modules, dep['hidden']): # no module available (yet) => retain dependency as one to be resolved _log.debug("No module available for dep %s, retaining it", dep) deps.append(dep) # update list of dependencies with only those unresolved new_ec['dependencies'] = deps # if all dependencies have been resolved, add module for this easyconfig in the list of available modules if not new_ec['dependencies']: _log.debug("Adding easyconfig %s to final list" % new_ec['spec']) ordered_ecs.append(new_ec) avail_modules.append(easyconfig['full_mod_name']) else: new_easyconfigs.append(new_ec) return ordered_ecs, new_easyconfigs, avail_modules
def setUp(self): """Set up testcase.""" self.log = fancylogger.getLogger(self.__class__.__name__, fname=False) fd, self.logfile = tempfile.mkstemp(suffix='.log', prefix='eb-test-') os.close(fd) self.cwd = os.getcwd() # keep track of original environment to restore self.orig_environ = copy.deepcopy(os.environ) # keep track of original environment/Python search path to restore self.orig_sys_path = sys.path[:] self.orig_paths = {} for path in ['buildpath', 'installpath', 'sourcepath']: self.orig_paths[path] = os.environ.get('EASYBUILD_%s' % path.upper(), None) testdir = os.path.dirname(os.path.abspath(__file__)) self.test_sourcepath = os.path.join(testdir, 'sandbox', 'sources') os.environ['EASYBUILD_SOURCEPATH'] = self.test_sourcepath self.test_buildpath = tempfile.mkdtemp() os.environ['EASYBUILD_BUILDPATH'] = self.test_buildpath self.test_installpath = tempfile.mkdtemp() os.environ['EASYBUILD_INSTALLPATH'] = self.test_installpath init_config() # add test easyblocks to Python search path and (re)import and reload easybuild modules import easybuild sys.path.append(os.path.join(testdir, 'sandbox')) reload(easybuild) import easybuild.easyblocks reload(easybuild.easyblocks) import easybuild.easyblocks.generic reload(easybuild.easyblocks.generic) reload(easybuild.tools.module_naming_scheme) # required to run options unit tests stand-alone # set MODULEPATH to included test modules os.environ['MODULEPATH'] = os.path.join(testdir, 'modules') # purge out any loaded modules with original $MODULEPATH before running each test modules_tool().purge()
def test_path_to_top_of_module_tree(self): """Test function to determine path to top of the module tree.""" modtool = modules_tool() path = modtool.path_to_top_of_module_tree([], 'gompi/1.3.12', '', ['GCC/4.6.4', 'OpenMPI/1.6.4-GCC-4.6.4']) self.assertEqual(path, []) path = modtool.path_to_top_of_module_tree([], 'toy/.0.0-deps', '', ['gompi/1.3.12']) self.assertEqual(path, []) path = modtool.path_to_top_of_module_tree([], 'toy/0.0', '', []) self.assertEqual(path, [])
def test_misc_flags_shared(self): """Test whether shared compiler flags are set correctly.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FFLAGS', 'F90FLAGS'] # setting option should result in corresponding flag to be set (shared options) for opt in ['pic', 'verbose', 'debug', 'static', 'shared']: for enable in [True, False]: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({opt: enable}) tc.prepare() # we need to make sure we check for flags, not letter (e.g. 'v' vs '-v') flag = '-%s' % tc.COMPILER_SHARED_OPTION_MAP[opt] for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue(flag in flags, "%s: True means %s in %s" % (opt, flag, flags)) else: self.assertTrue(flag not in flags, "%s: False means no %s in %s" % (opt, flag, flags)) modules.modules_tool().purge()
def reset_modulepath(self, modpaths): """Reset $MODULEPATH with specified paths.""" modtool = modules_tool() for modpath in os.environ.get('MODULEPATH', '').split(os.pathsep): modtool.remove_module_path(modpath) # make very sure $MODULEPATH is totally empty # some paths may be left behind, e.g. when they contain environment variables # example: "module unuse Modules/$MODULE_VERSION/modulefiles" may not yield the desired result os.environ['MODULEPATH'] = '' for modpath in modpaths: modtool.add_module_path(modpath)
def test_purge(self): """Test if purging of modules works.""" m = modules_tool([os.path.join(os.path.dirname(__file__), 'modules')]) ms = m.available('', None) m.add_module([ms[0]]) m.load() self.assertTrue(len(m.loaded_modules()) > 0) m.purge() self.assertTrue(len(m.loaded_modules()) == 0)
def test_misc_flags_shared(self): """Test whether shared compiler flags are set correctly.""" flag_vars = ['CFLAGS', 'CXXFLAGS', 'FCFLAGS', 'FFLAGS', 'F90FLAGS'] # setting option should result in corresponding flag to be set (shared options) for opt in ['pic', 'verbose', 'debug', 'static', 'shared']: for enable in [True, False]: tc = self.get_toolchain("goalf", version="1.1.0-no-OFED") tc.set_options({opt: enable}) tc.prepare() # we need to make sure we check for flags, not letter (e.g. 'v' vs '-v') flag = '-%s' % tc.COMPILER_SHARED_OPTION_MAP[opt] for var in flag_vars: flags = tc.get_variable(var) if enable: self.assertTrue(flag in flags, "%s: True means %s in %s" % (opt, flag, flags)) else: self.assertTrue(flag not in flags, "%s: False means no %s in %s" % (opt, flag, flags)) modules.modules_tool().purge()
def test_wrong_modulepath(self): """Test whether modules tool can deal with a broken $MODULEPATH.""" test_modules_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'modules') modules_test_installpath = os.path.join(self.test_installpath, 'modules', 'all') os.environ['MODULEPATH'] = "/some/non-existing/path:/this/doesnt/exists/anywhere:%s" % test_modules_path init_config() modtool = modules_tool() self.assertEqual(len(modtool.mod_paths), 2) self.assertTrue(os.path.samefile(modtool.mod_paths[0], modules_test_installpath)) self.assertEqual(modtool.mod_paths[1], test_modules_path) self.assertTrue(len(modtool.available()) > 0)