def setup_user(self, msg=None): partition = (fixtures.partition_with_scheduler(self.sched_name) or fixtures.partition_with_scheduler('slurm')) if partition is None: self.skipTest('SLURM not configured') self.testjob.options += partition.access
def setup_user(self, msg=None): '''Configure the test for running with the user supplied job scheduler configuration or skip it. ''' partition = fixtures.partition_with_scheduler(self.sched_name) if partition is None: msg = msg or "scheduler '%s' not configured" % self.sched_name self.skipTest(msg) self.testjob.options += partition.access
def setup_remote_execution(self): self.partition = fixtures.partition_with_scheduler() if self.partition is None: self.skipTest('job submission not supported') try: self.progenv = self.partition.environs[0] except IndexError: self.skipTest('no environments configured for partition: %s' % self.partition.fullname)
def _setup_remote_execution(scheduler=None): partition = fixtures.partition_with_scheduler(scheduler) if partition is None: pytest.skip('job submission not supported') try: environ = partition.environs[0] except IndexError: pytest.skip('no environments configured for partition: %s' % partition.fullname) return partition, environ
def test_check_submit_success(self): # This test will run on the auto-detected system system = fixtures.HOST partition = fixtures.partition_with_scheduler(None) init_modules_system(system.modules_system) self.local = False self.system = partition.fullname # pick up the programming environment of the partition self.environs = [partition.environs[0].name] returncode, stdout, _ = self._run_reframe() self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertEqual(0, returncode)
def test_check_submit_success(self): # This test will run on the auto-detected system partition = fixtures.partition_with_scheduler() if not partition: self.skipTest('job submission not supported') self.config_file = fixtures.USER_CONFIG_FILE self.local = False self.system = partition.fullname # pick up the programming environment of the partition self.environs = [partition.environs[0].name] returncode, stdout, _ = self._run_reframe() self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertEqual(0, returncode)
def test_check_submit_success(self): # This test will run on the auto-detected system partition = fixtures.partition_with_scheduler() if not partition: self.skipTest('job submission not supported') self.config_file = fixtures.USER_CONFIG_FILE self.local = False self.system = partition.fullname # Pick up the programming environment of the partition # Prepend ^ and append $ so as to much exactly the given name self.environs = ['^' + partition.environs[0].name + '$'] returncode, stdout, _ = self._run_reframe() self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) # Assert that we have run only one test case self.assertIn('Ran 1 test case(s)', stdout) self.assertEqual(0, returncode)
def test_check_submit_success(self): # This test will run on the auto-detected system system = fixtures.HOST partition = fixtures.partition_with_scheduler(None) init_modules_system(system.modules_system) self.local = False self.system = partition.fullname # Use the system config file here # # FIXME: This whole thing is quite hacky; we definitely need to # redesign the fixtures. It is also not equivalent to the previous # version, which monkey-patched the logging settings. self.config_file = os.getenv('RFM_CONFIG_FILE', 'reframe/settings.py') self.delete_config_file = False # pick up the programming environment of the partition self.environs = [partition.environs[0].name] returncode, stdout, _ = self._run_reframe() self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertEqual(0, returncode)
def sched_configured(self): return fixtures.partition_with_scheduler('pbs') is not None
def setup_from_sysconfig(self): partition = partition_with_scheduler('slurm') self.testjob.options += partition.access
class TestSlurmJob(_TestJob): @property def job_type(self): return getscheduler('slurm') @property def launcher(self): return LocalLauncher() def setup_from_sysconfig(self): partition = partition_with_scheduler('slurm') self.testjob.options += partition.access def test_prepare(self): # Mock up a job submission self.testjob._time_limit = (0, 5, 0) self.testjob._num_tasks = 16 self.testjob._num_tasks_per_node = 2 self.testjob._num_tasks_per_core = 1 self.testjob._num_tasks_per_socket = 1 self.testjob._num_cpus_per_task = 18 self.testjob._use_smt = True self.testjob._sched_nodelist = 'nid000[00-17]' self.testjob._sched_exclude_nodelist = 'nid00016' self.testjob._sched_partition = 'foo' self.testjob._sched_reservation = 'bar' self.testjob._sched_account = 'spam' self.testjob._sched_exclusive_access = True self.testjob.options = [ '--gres=gpu:4', '#DW jobdw capacity=100GB', '#DW stage_in source=/foo' ] super().test_prepare() expected_directives = set([ '#SBATCH --job-name="rfm_testjob"', '#SBATCH --time=0:5:0', '#SBATCH --output=%s' % self.testjob.stdout, '#SBATCH --error=%s' % self.testjob.stderr, '#SBATCH --ntasks=%s' % self.testjob.num_tasks, '#SBATCH --ntasks-per-node=%s' % self.testjob.num_tasks_per_node, '#SBATCH --ntasks-per-core=%s' % self.testjob.num_tasks_per_core, ('#SBATCH --ntasks-per-socket=%s' % self.testjob.num_tasks_per_socket), '#SBATCH --cpus-per-task=%s' % self.testjob.num_cpus_per_task, '#SBATCH --hint=multithread', '#SBATCH --nodelist=%s' % self.testjob.sched_nodelist, '#SBATCH --exclude=%s' % self.testjob.sched_exclude_nodelist, '#SBATCH --partition=%s' % self.testjob.sched_partition, '#SBATCH --reservation=%s' % self.testjob.sched_reservation, '#SBATCH --account=%s' % self.testjob.sched_account, '#SBATCH --exclusive', # Custom options and directives '#SBATCH --gres=gpu:4', '#DW jobdw capacity=100GB', '#DW stage_in source=/foo' ]) with open(self.testjob.script_filename) as fp: found_directives = set( re.findall(r'^\#\w+ .*', fp.read(), re.MULTILINE)) self.assertEqual(expected_directives, found_directives) def test_prepare_no_exclusive(self): self.testjob._sched_exclusive_access = False super().test_prepare() with open(self.testjob.script_filename) as fp: self.assertIsNone(re.search(r'--exclusive', fp.read())) def test_prepare_no_smt(self): self.testjob._use_smt = None super().test_prepare() with open(self.testjob.script_filename) as fp: self.assertIsNone(re.search(r'--hint', fp.read())) def test_prepare_with_smt(self): self.testjob._use_smt = True super().test_prepare() with open(self.testjob.script_filename) as fp: self.assertIsNotNone(re.search(r'--hint=multithread', fp.read())) def test_prepare_without_smt(self): self.testjob._use_smt = False super().test_prepare() with open(self.testjob.script_filename) as fp: self.assertIsNotNone(re.search(r'--hint=nomultithread', fp.read())) @unittest.skipIf(not partition_with_scheduler('slurm'), 'Slurm scheduler not supported') def test_submit(self): self.setup_from_sysconfig() super().test_submit() @unittest.skipIf(not partition_with_scheduler('slurm'), 'Slurm scheduler not supported') def test_submit_timelimit(self): # Skip this test for Slurm, since we the minimum time limit is 1min self.skipTest("Slurm's minimum time limit is 60s") @unittest.skipIf(not partition_with_scheduler('slurm'), 'Slurm scheduler not supported') def test_cancel(self): from reframe.core.schedulers.slurm import SLURM_JOB_CANCELLED self.setup_from_sysconfig() super().test_cancel() self.assertEqual(self.testjob.state, SLURM_JOB_CANCELLED) @unittest.skipIf(not partition_with_scheduler('slurm'), 'Slurm scheduler not supported') def test_poll(self): self.setup_from_sysconfig() super().test_poll()
class TestFrontend(unittest.TestCase): @property def argv(self): ret = ['./bin/reframe', '--prefix', self.prefix, '--nocolor'] if self.mode: ret += ['--mode', self.mode] if self.system: ret += ['--system', self.system] if self.config_file: ret += ['-C', self.config_file] ret += itertools.chain(*(['-c', c] for c in self.checkpath)) ret += itertools.chain(*(['-p', e] for e in self.environs)) if self.local: ret += ['--force-local'] if self.action == 'run': ret += ['-r'] elif self.action == 'list': ret += ['-l'] elif self.action == 'help': ret += ['-h'] if self.ignore_check_conflicts: ret += ['--ignore-check-conflicts'] ret += self.more_options return ret def setUp(self): self.prefix = tempfile.mkdtemp(dir='unittests') self.config_file = 'custom_settings.py' self.system = 'generic:login' self.checkpath = ['unittests/resources/hellocheck.py'] self.environs = ['builtin-gcc'] self.local = True self.action = 'run' self.more_options = [] self.mode = None self.config_file, subst = fixtures.generate_test_config() self.logfile = subst['logfile'] self.delete_config_file = True self.ignore_check_conflicts = True def tearDown(self): shutil.rmtree(self.prefix) os.remove(self.logfile) if self.delete_config_file: os.remove(self.config_file) def _run_reframe(self): import reframe.frontend.cli as cli return run_command_inline(self.argv, cli.main) def _stage_exists(self, check_name, partitions, environs): stagedir = os.path.join(self.prefix, 'stage') for p in partitions: for e in environs: path = os.path.join(stagedir, p, check_name, e) if not os.path.exists(path): return False return True def _perflog_exists(self, check_name, partitions): logdir = os.path.join(self.prefix, 'logs') for p in partitions: logfile = os.path.join(logdir, p, check_name + '.log') if not os.path.exists(logfile): return False return True def assert_log_file_is_saved(self): outputdir = os.path.join(self.prefix, 'output') self.assertTrue(os.path.exists(self.logfile)) self.assertTrue(os.path.exists( os.path.join(outputdir, os.path.basename(self.logfile)))) def test_check_success(self): self.more_options = ['--save-log-files'] returncode, stdout, _ = self._run_reframe() self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertEqual(0, returncode) self.assert_log_file_is_saved() @unittest.skipIf(not fixtures.partition_with_scheduler(None), 'job submission not supported') def test_check_submit_success(self): # This test will run on the auto-detected system system = fixtures.HOST partition = fixtures.partition_with_scheduler(None) init_modules_system(system.modules_system) self.local = False self.system = partition.fullname # Use the system config file here # # FIXME: This whole thing is quite hacky; we definitely need to # redesign the fixtures. It is also not equivalent to the previous # version, which monkey-patched the logging settings. self.config_file = os.getenv('RFM_CONFIG_FILE', 'reframe/settings.py') self.delete_config_file = False # pick up the programming environment of the partition self.environs = [partition.environs[0].name] returncode, stdout, _ = self._run_reframe() self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertEqual(0, returncode) def test_check_failure(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['-t', 'BadSetupCheck'] returncode, stdout, _ = self._run_reframe() self.assertIn('FAILED', stdout) self.assertNotEqual(returncode, 0) def test_check_setup_failure(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['-t', 'BadSetupCheckEarly'] self.local = False returncode, stdout, stderr = self._run_reframe() self.assertNotIn('Traceback', stdout) self.assertNotIn('Traceback', stderr) self.assertIn('FAILED', stdout) self.assertNotEqual(returncode, 0) def test_check_kbd_interrupt(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['-t', 'KeyboardInterruptCheck'] self.local = False returncode, stdout, stderr = self._run_reframe() self.assertNotIn('Traceback', stdout) self.assertNotIn('Traceback', stderr) self.assertIn('FAILED', stdout) self.assertNotEqual(returncode, 0) def test_check_sanity_failure(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['-t', 'SanityFailureCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('FAILED', stdout) # This is a normal failure, it should not raise any exception self.assertNotIn('Traceback', stdout) self.assertNotIn('Traceback', stderr) self.assertNotEqual(returncode, 0) self.assertTrue(self._stage_exists('SanityFailureCheck', ['login'], self.environs)) def test_performance_check_failure(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['-t', 'PerformanceFailureCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('FAILED', stdout) # This is a normal failure, it should not raise any exception self.assertNotIn('Traceback', stdout) self.assertNotIn('Traceback', stderr) self.assertNotEqual(0, returncode) self.assertTrue(self._stage_exists('PerformanceFailureCheck', ['login'], self.environs)) self.assertTrue(self._perflog_exists('PerformanceFailureCheck', ['login'])) def test_skip_system_check_option(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['--skip-system-check', '-t', 'NoSystemCheck'] returncode, stdout, _ = self._run_reframe() self.assertIn('PASSED', stdout) def test_skip_prgenv_check_option(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['--skip-prgenv-check', '-t', 'NoPrgEnvCheck'] returncode, stdout, _ = self._run_reframe() self.assertIn('PASSED', stdout) self.assertEqual(0, returncode) def test_sanity_of_checks(self): # This test will effectively load all the tests in the checks path and # will force a syntactic and runtime check at least for the constructor # of the checks self.action = 'list' self.more_options = ['--save-log-files'] self.checkpath = [] returncode, *_ = self._run_reframe() self.assertEqual(0, returncode) self.assert_log_file_is_saved() def test_unknown_system(self): self.action = 'list' self.system = 'foo' self.checkpath = [] returncode, stdout, stderr = self._run_reframe() self.assertNotIn('Traceback', stdout) self.assertNotIn('Traceback', stderr) self.assertEqual(1, returncode) def test_sanity_of_optconfig(self): # Test the sanity of the command line options configuration self.action = 'help' self.checkpath = [] returncode, *_ = self._run_reframe() self.assertEqual(0, returncode) def test_checkpath_recursion(self): self.action = 'list' self.checkpath = [] returncode, stdout, _ = self._run_reframe() num_checks_default = re.search( 'Found (\d+) check', stdout, re.MULTILINE).group(1) self.checkpath = ['checks/'] self.more_options = ['-R'] returncode, stdout, _ = self._run_reframe() num_checks_in_checkdir = re.search( 'Found (\d+) check', stdout, re.MULTILINE).group(1) self.assertEqual(num_checks_in_checkdir, num_checks_default) self.more_options = [] returncode, stdout, stderr = self._run_reframe() num_checks_in_checkdir = re.search( 'Found (\d+) check', stdout, re.MULTILINE).group(1) self.assertEqual('0', num_checks_in_checkdir) def test_same_output_stage_dir(self): output_dir = os.path.join(self.prefix, 'foo') self.more_options = ['-o', output_dir, '-s', output_dir] returncode, *_ = self._run_reframe() self.assertEqual(1, returncode) # retry with --keep-stage-files self.more_options.append('--keep-stage-files') returncode, *_ = self._run_reframe() self.assertEqual(0, returncode) self.assertTrue(os.path.exists(output_dir)) def test_execution_modes(self): self.checkpath = [] self.environs = [] self.local = False self.mode = 'unittest' returncode, stdout, stderr = self._run_reframe() self.assertNotIn('Traceback', stdout) self.assertNotIn('Traceback', stderr) self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertIn('Ran 1 test case', stdout) def test_unknown_modules_system(self): fixtures.generate_test_config( self.config_file, logfile=self.logfile, modules_system="'foo'") returncode, stdout, stderr = self._run_reframe() self.assertNotEqual(0, returncode) def test_no_ignore_check_conflicts(self): self.checkpath = ['unittests/resources'] self.more_options = ['-R'] self.ignore_check_conflicts = False self.action = 'list' returncode, *_ = self._run_reframe() self.assertNotEqual(0, returncode)
def setup_from_site(self): self.partition = fixtures.partition_with_scheduler(None) # pick the first environment of partition if self.partition.environs: self.progenv = self.partition.environs[0]
class TestRegressionTest(unittest.TestCase): def setUp(self): get_modules_system().searchpath_add(fixtures.TEST_MODULES) # Load a system configuration self.system, self.partition, self.progenv = fixtures.get_test_config() self.resourcesdir = tempfile.mkdtemp(dir='unittests') self.loader = RegressionCheckLoader(['unittests/resources']) self.resources = ResourcesManager(prefix=self.resourcesdir) def tearDown(self): shutil.rmtree(self.resourcesdir, ignore_errors=True) def setup_from_site(self): self.partition = fixtures.partition_with_scheduler(None) # pick the first environment of partition if self.partition.environs: self.progenv = self.partition.environs[0] def replace_prefix(self, filename, new_prefix): basename = os.path.basename(filename) return os.path.join(new_prefix, basename) def keep_files_list(self, test, compile_only=False): from reframe.core.deferrable import evaluate ret = [ self.replace_prefix(evaluate(test.stdout), test.outputdir), self.replace_prefix(evaluate(test.stderr), test.outputdir) ] if not compile_only: ret.append( self.replace_prefix(test.job.script_filename, test.outputdir)) ret.extend( [self.replace_prefix(f, test.outputdir) for f in test.keep_files]) return ret def test_environ_setup(self): test = self.loader.load_from_file('unittests/resources/hellocheck.py', system=self.system, resources=self.resources)[0] # Use test environment for the regression check test.valid_prog_environs = [self.progenv.name] test.modules = ['testmod_foo'] test.variables = {'_FOO_': '1', '_BAR_': '2'} test.local = True test.setup(self.partition, self.progenv) for m in test.modules: self.assertTrue(get_modules_system().is_module_loaded(m)) for k, v in test.variables.items(): self.assertEqual(os.environ[k], v) # Manually unload the environment self.progenv.unload() def _run_test(self, test, compile_only=False): test.setup(self.partition, self.progenv) test.compile() test.run() test.wait() test.check_sanity() test.check_performance() test.cleanup(remove_files=True) self.assertFalse(os.path.exists(test.stagedir)) for f in self.keep_files_list(test, compile_only): self.assertTrue(os.path.exists(f)) @unittest.skipIf(not fixtures.partition_with_scheduler(None), 'job submission not supported') def test_hellocheck(self): self.setup_from_site() test = self.loader.load_from_file('unittests/resources/hellocheck.py', system=self.system, resources=self.resources)[0] # Use test environment for the regression check test.valid_prog_environs = [self.progenv.name] self._run_test(test) @unittest.skipIf(not fixtures.partition_with_scheduler(None), 'job submission not supported') def test_hellocheck_make(self): self.setup_from_site() test = self.loader.load_from_file( 'unittests/resources/hellocheck_make.py', system=self.system, resources=self.resources)[0] # Use test environment for the regression check test.valid_prog_environs = [self.progenv.name] self._run_test(test) def test_hellocheck_local(self): test = self.loader.load_from_file('unittests/resources/hellocheck.py', system=self.system, resources=self.resources)[0] # Use test environment for the regression check test.valid_prog_environs = [self.progenv.name] # Test also the prebuild/postbuild functionality test.prebuild_cmd = ['touch prebuild'] test.postbuild_cmd = ['touch postbuild'] test.keepfiles = ['prebuild', 'postbuild'] # Force local execution of the test test.local = True self._run_test(test) def test_hellocheck_local_slashes(self): # Try to fool path creation by adding slashes to environment partitions # names from reframe.core.environments import ProgEnvironment self.progenv = ProgEnvironment('bad/name', self.progenv.modules, self.progenv.variables) # That's a bit hacky, but we are in a unit test self.system._name += os.sep + 'bad' self.partition._name += os.sep + 'bad' self.test_hellocheck_local() def test_hellocheck_local_prepost_run(self): @sn.sanity_function def stagedir(test): return test.stagedir test = self.loader.load_from_file('unittests/resources/hellocheck.py', system=self.system, resources=self.resources)[0] # Use test environment for the regression check test.valid_prog_environs = [self.progenv.name] # Test also the prebuild/postbuild functionality test.pre_run = ['echo prerun: `pwd`'] test.post_run = ['echo postrun: `pwd`'] pre_run_path = sn.extractsingle(r'^prerun: (\S+)', test.stdout, 1) post_run_path = sn.extractsingle(r'^postrun: (\S+)', test.stdout, 1) test.sanity_patterns = sn.all([ sn.assert_eq(stagedir(test), pre_run_path), sn.assert_eq(stagedir(test), post_run_path), ]) # Force local execution of the test test.local = True self._run_test(test) def test_run_only_sanity(self): test = RunOnlyRegressionTest('runonlycheck', 'unittests/resources', resources=self.resources, system=self.system) test.executable = './hello.sh' test.executable_opts = ['Hello, World!'] test.local = True test.valid_prog_environs = ['*'] test.valid_systems = ['*'] test.sanity_patterns = sn.assert_found(r'Hello, World\!', test.stdout) self._run_test(test) def test_compile_only_failure(self): test = CompileOnlyRegressionTest('compileonlycheck', 'unittests/resources', resources=self.resources, system=self.system) test.sourcepath = 'compiler_failure.c' test.valid_prog_environs = [self.progenv.name] test.valid_systems = [self.system.name] test.setup(self.partition, self.progenv) self.assertRaises(CompilationError, test.compile) def test_compile_only_warning(self): test = CompileOnlyRegressionTest('compileonlycheckwarning', 'unittests/resources', resources=self.resources, system=self.system) test.sourcepath = 'compiler_warning.c' self.progenv.cflags = '-Wall' test.valid_prog_environs = [self.progenv.name] test.valid_systems = [self.system.name] test.sanity_patterns = sn.assert_found(r'warning', test.stderr) self._run_test(test, compile_only=True) def test_supports_system(self): test = self.loader.load_from_file('unittests/resources/hellocheck.py', system=self.system, resources=self.resources)[0] test.valid_systems = ['*'] self.assertTrue(test.supports_system('gpu')) self.assertTrue(test.supports_system('login')) self.assertTrue(test.supports_system('testsys:gpu')) self.assertTrue(test.supports_system('testsys:login')) test.valid_systems = ['testsys'] self.assertTrue(test.supports_system('gpu')) self.assertTrue(test.supports_system('login')) self.assertTrue(test.supports_system('testsys:gpu')) self.assertTrue(test.supports_system('testsys:login')) test.valid_systems = ['testsys:gpu'] self.assertTrue(test.supports_system('gpu')) self.assertFalse(test.supports_system('login')) self.assertTrue(test.supports_system('testsys:gpu')) self.assertFalse(test.supports_system('testsys:login')) test.valid_systems = ['testsys:login'] self.assertFalse(test.supports_system('gpu')) self.assertTrue(test.supports_system('login')) self.assertFalse(test.supports_system('testsys:gpu')) self.assertTrue(test.supports_system('testsys:login')) test.valid_systems = ['foo'] self.assertFalse(test.supports_system('gpu')) self.assertFalse(test.supports_system('login')) self.assertFalse(test.supports_system('testsys:gpu')) self.assertFalse(test.supports_system('testsys:login')) def test_supports_environ(self): test = self.loader.load_from_file('unittests/resources/hellocheck.py', system=self.system, resources=self.resources)[0] test.valid_prog_environs = ['*'] self.assertTrue(test.supports_environ('foo1')) self.assertTrue(test.supports_environ('foo-env')) self.assertTrue(test.supports_environ('*')) test.valid_prog_environs = ['PrgEnv-foo-*'] self.assertTrue(test.supports_environ('PrgEnv-foo-version1')) self.assertTrue(test.supports_environ('PrgEnv-foo-version2')) self.assertFalse(test.supports_environ('PrgEnv-boo-version1')) self.assertFalse(test.supports_environ('Prgenv-foo-version1')) def test_sourcesdir_none(self): test = RegressionTest('hellocheck', 'unittests/resources', resources=self.resources, system=self.system) test.sourcesdir = None test.valid_prog_environs = ['*'] test.valid_systems = ['*'] self.assertRaises(ReframeError, self._run_test, test) def test_sourcesdir_none_generated_sources(self): test = RegressionTest('hellocheck_generated_sources', 'unittests/resources', resources=self.resources, system=self.system) test.sourcesdir = None test.prebuild_cmd = [ "printf '#include <stdio.h>\\n int main(){ " "printf(\"Hello, World!\\\\n\"); return 0; }' " "> hello.c" ] test.executable = './hello' test.sourcepath = 'hello.c' test.local = True test.valid_systems = ['*'] test.valid_prog_environs = ['*'] test.sanity_patterns = sn.assert_found(r'Hello, World\!', test.stdout) self._run_test(test) def test_sourcesdir_none_compile_only(self): test = CompileOnlyRegressionTest('hellocheck', 'unittests/resources', resources=self.resources, system=self.system) test.sourcesdir = None test.valid_prog_environs = ['*'] test.valid_systems = ['*'] self.assertRaises(CompilationError, self._run_test, test) def test_sourcesdir_none_run_only(self): test = RunOnlyRegressionTest('hellocheck', 'unittests/resources', resources=self.resources, system=self.system) test.sourcesdir = None test.executable = 'echo' test.executable_opts = ["Hello, World!"] test.local = True test.valid_prog_environs = ['*'] test.valid_systems = ['*'] test.sanity_patterns = sn.assert_found(r'Hello, World\!', test.stdout) self._run_test(test) def test_sourcepath_abs(self): test = CompileOnlyRegressionTest('compileonlycheck', 'unittests/resources', resources=self.resources, system=self.system) test.valid_prog_environs = [self.progenv.name] test.valid_systems = [self.system.name] test.setup(self.partition, self.progenv) test.sourcepath = '/usr/src' self.assertRaises(PipelineError, test.compile) def test_sourcepath_upref(self): test = CompileOnlyRegressionTest('compileonlycheck', 'unittests/resources', resources=self.resources, system=self.system) test.valid_prog_environs = [self.progenv.name] test.valid_systems = [self.system.name] test.setup(self.partition, self.progenv) test.sourcepath = '../hellosrc' self.assertRaises(PipelineError, test.compile) def test_extra_resources(self): # Load test site configuration system, partition, progenv = fixtures.get_test_config() test = RegressionTest('dummycheck', 'unittests/resources', resources=self.resources, system=self.system) test.valid_prog_environs = ['*'] test.valid_systems = ['*'] test.extra_resources = { 'gpu': { 'num_gpus_per_node': 2 }, 'datawarp': { 'capacity': '100GB', 'stagein_src': '/foo' } } test.setup(self.partition, self.progenv) test.job.options += ['--foo'] expected_job_options = [ '--gres=gpu:2', '#DW jobdw capacity=100GB', '#DW stage_in source=/foo', '--foo' ] self.assertCountEqual(expected_job_options, test.job.options)
class TestFrontend(unittest.TestCase): @property def argv(self): ret = ['./bin/reframe', '--prefix', self.prefix, '--nocolor'] if self.mode: ret += ['--mode', self.mode] if self.system: ret += ['--system', self.system] ret += itertools.chain(*(['-c', c] for c in self.checkpath)) ret += itertools.chain(*(['-p', e] for e in self.environs)) if self.local: ret += ['--force-local'] if self.action == 'run': ret += ['-r'] elif self.action == 'list': ret += ['-l'] elif self.action == 'help': ret += ['-h'] ret += self.more_options return ret def setUp(self): self.prefix = tempfile.mkdtemp(dir='unittests') self.system = 'generic:login' self.checkpath = ['unittests/resources/hellocheck.py'] self.environs = ['builtin-gcc'] self.local = True self.action = 'run' self.more_options = [] self.mode = None # Monkey patch logging configuration self.logfile = os.path.join(self.prefix, 'reframe.log') settings._logging_config = { 'level': 'DEBUG', 'handlers': { self.logfile: { 'level': 'DEBUG', 'format': '[%(asctime)s] %(levelname)s: ' '%(check_name)s: %(message)s', 'datefmt': '%FT%T', 'append': False, }, '&1': { 'level': 'INFO', 'format': '%(message)s' }, } } # Monkey patch site configuration setting a mode settings._site_configuration['modes'] = { '*': { 'unittest': [ '-c', 'unittests/resources/hellocheck.py', '-p', 'builtin-gcc', '--force-local' ] } } def _run_reframe(self): import reframe.frontend.cli as cli return run_command_inline(self.argv, cli.main) def _stage_exists(self, check_name, partitions, environs): stagedir = os.path.join(self.prefix, 'stage') for p in partitions: for e in environs: path = os.path.join(stagedir, p, check_name, e) if not os.path.exists(path): return False return True def _perflog_exists(self, check_name, partitions): logdir = os.path.join(self.prefix, 'logs') for p in partitions: logfile = os.path.join(logdir, p, check_name + '.log') if not os.path.exists(logfile): return False return True def assert_log_file_is_saved(self): outputdir = os.path.join(self.prefix, 'output') self.assertTrue(os.path.exists(self.logfile)) self.assertTrue(os.path.exists( os.path.join(outputdir, os.path.basename(self.logfile)))) def test_check_success(self): self.more_options = ['--save-log-files'] returncode, stdout, _ = self._run_reframe() self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertEqual(0, returncode) self.assert_log_file_is_saved() @unittest.skipIf(not fixtures.partition_with_scheduler(None), 'job submission not supported') def test_check_submit_success(self): # This test will run on the auto-detected system system = fixtures.HOST partition = fixtures.partition_with_scheduler(None) init_modules_system(system.modules_system) self.local = False self.system = partition.fullname # pick up the programming environment of the partition self.environs = [partition.environs[0].name] returncode, stdout, _ = self._run_reframe() self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertEqual(0, returncode) def test_check_failure(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['-t', 'BadSetupCheck'] returncode, stdout, _ = self._run_reframe() self.assertIn('FAILED', stdout) self.assertNotEqual(returncode, 0) def test_check_setup_failure(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['-t', 'BadSetupCheckEarlyNonLocal'] self.local = False returncode, stdout, stderr = self._run_reframe() self.assertNotIn('Traceback', stderr) self.assertIn('FAILED', stdout) self.assertNotEqual(returncode, 0) def test_check_sanity_failure(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['-t', 'SanityFailureCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('FAILED', stdout) # This is a normal failure, it should not raise any exception self.assertNotIn('Traceback', stderr) self.assertNotEqual(returncode, 0) self.assertTrue(self._stage_exists('SanityFailureCheck', ['login'], self.environs)) def test_performance_check_failure(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['-t', 'PerformanceFailureCheck'] returncode, stdout, stderr = self._run_reframe() self.assertIn('FAILED', stdout) # This is a normal failure, it should not raise any exception self.assertNotIn('Traceback', stderr) self.assertNotEqual(0, returncode) self.assertTrue(self._stage_exists('PerformanceFailureCheck', ['login'], self.environs)) self.assertTrue(self._perflog_exists('PerformanceFailureCheck', ['login'])) def test_skip_system_check_option(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['--skip-system-check', '-t', 'NoSystemCheck'] returncode, stdout, _ = self._run_reframe() self.assertIn('PASSED', stdout) def test_skip_prgenv_check_option(self): self.checkpath = ['unittests/resources/frontend_checks.py'] self.more_options = ['--skip-prgenv-check', '-t', 'NoPrgEnvCheck'] returncode, stdout, _ = self._run_reframe() self.assertIn('PASSED', stdout) self.assertEqual(0, returncode) def test_sanity_of_checks(self): # This test will effectively load all the tests in the checks path and # will force a syntactic and runtime check at least for the constructor # of the checks self.action = 'list' self.more_options = ['--save-log-files'] self.checkpath = [] returncode, *_ = self._run_reframe() self.assertEqual(0, returncode) self.assert_log_file_is_saved() def test_unknown_system(self): self.action = 'list' self.system = 'foo' self.checkpath = [] returncode, stdout, stderr = self._run_reframe() self.assertNotIn('Traceback', stdout) self.assertNotIn('Traceback', stderr) self.assertEqual(1, returncode) def test_sanity_of_optconfig(self): # Test the sanity of the command line options configuration self.action = 'help' self.checkpath = [] returncode, *_ = self._run_reframe() self.assertEqual(0, returncode) def test_checkpath_recursion(self): self.action = 'list' self.checkpath = [] returncode, stdout, _ = self._run_reframe() num_checks_default = re.search( 'Found (\d+) check', stdout, re.MULTILINE).group(1) self.checkpath = ['checks/'] self.more_options = ['-R'] returncode, stdout, _ = self._run_reframe() num_checks_in_checkdir = re.search( 'Found (\d+) check', stdout, re.MULTILINE).group(1) self.assertEqual(num_checks_in_checkdir, num_checks_default) self.more_options = [] returncode, stdout, stderr = self._run_reframe() num_checks_in_checkdir = re.search( 'Found (\d+) check', stdout, re.MULTILINE).group(1) self.assertEqual('0', num_checks_in_checkdir) def test_same_output_stage_dir(self): output_dir = os.path.join(self.prefix, 'foo') self.more_options = ['-o', output_dir, '-s', output_dir] returncode, *_ = self._run_reframe() self.assertEqual(1, returncode) # retry with --keep-stage-files self.more_options.append('--keep-stage-files') returncode, *_ = self._run_reframe() self.assertEqual(0, returncode) self.assertTrue(os.path.exists(output_dir)) def test_execution_modes(self): self.checkpath = [] self.environs = [] self.local = False self.mode = 'unittest' returncode, stdout, stderr = self._run_reframe() self.assertNotIn('Traceback', stderr) self.assertNotIn('FAILED', stdout) self.assertIn('PASSED', stdout) self.assertIn('Ran 1 test case', stdout) def tearDown(self): shutil.rmtree(self.prefix)