def run_command_inline(argv, funct, *args, **kwargs): # Save current execution context argv_save = sys.argv environ_save = EnvironmentSnapshot() sys.argv = argv exitcode = None captured_stdout = StringIO() captured_stderr = StringIO() print(*sys.argv) with redirect_stdout(captured_stdout): with redirect_stderr(captured_stderr): try: with rt.temp_runtime(None): exitcode = funct(*args, **kwargs) except SystemExit as e: exitcode = e.code finally: # Restore execution context environ_save.load() sys.argv = argv_save return (exitcode, captured_stdout.getvalue(), captured_stderr.getvalue())
def __init__(self, policy, printer=None, max_retries=0): self._policy = policy self._printer = printer or PrettyPrinter() self._max_retries = max_retries self._stats = TestStats() self._policy.stats = self._stats self._policy.printer = self._printer self._sandbox = Sandbox() self._environ_snapshot = EnvironmentSnapshot()
class _TestModulesSystem(unittest.TestCase): def setUp(self): self.modules_system = modules.get_modules_system() self.environ_save = EnvironmentSnapshot() self.modules_system.searchpath_add(TEST_MODULES) def tearDown(self): self.environ_save.load() def test_searchpath(self): self.assertIn(TEST_MODULES, self.modules_system.searchpath) self.modules_system.searchpath_remove(TEST_MODULES) self.assertNotIn(TEST_MODULES, self.modules_system.searchpath) def test_module_load(self): self.assertRaises(ModuleError, self.modules_system.load_module, 'foo') self.assertFalse(self.modules_system.is_module_loaded('foo')) self.assertNotIn('foo', self.modules_system.loaded_modules()) self.modules_system.load_module('testmod_foo') self.assertTrue(self.modules_system.is_module_loaded('testmod_foo')) self.assertIn('testmod_foo', self.modules_system.loaded_modules()) self.assertIn('TESTMOD_FOO', os.environ) self.modules_system.unload_module('testmod_foo') self.assertFalse(self.modules_system.is_module_loaded('testmod_foo')) self.assertNotIn('testmod_foo', self.modules_system.loaded_modules()) self.assertNotIn('TESTMOD_FOO', os.environ) def test_module_load_force(self): self.modules_system.load_module('testmod_foo') unloaded = self.modules_system.load_module('testmod_foo', force=True) self.assertEqual(0, len(unloaded)) self.assertTrue(self.modules_system.is_module_loaded('testmod_foo')) unloaded = self.modules_system.load_module('testmod_bar', True) self.assertTrue(self.modules_system.is_module_loaded('testmod_bar')) self.assertFalse(self.modules_system.is_module_loaded('testmod_foo')) self.assertIn('testmod_foo', unloaded) self.assertIn('TESTMOD_BAR', os.environ) def test_module_unload_all(self): self.modules_system.load_module('testmod_base') self.modules_system.unload_all() self.assertEqual(0, len(self.modules_system.loaded_modules())) def test_module_list(self): self.modules_system.load_module('testmod_foo') self.assertIn('testmod_foo', self.modules_system.loaded_modules()) self.modules_system.unload_module('testmod_foo') def test_module_conflict_list(self): conflict_list = self.modules_system.conflicted_modules('testmod_bar') self.assertIn('testmod_foo', conflict_list) self.assertIn('testmod_boo', conflict_list)
def _setup_environ(self, environ): """Setup the current environment and load it.""" self._current_environ = environ # Set up user environment self._user_environ = Environment( type(self).__name__, self.modules, self.variables.items()) # Temporarily load the test's environment to record the actual module # load/unload sequence environ_save = EnvironmentSnapshot() # First load the local environment of the partition self.logger.debug('loading environment for the current partition') self._current_partition.local_env.load() self.logger.debug("loading current programming environment") self._current_environ.load() self.logger.debug("loading user's environment") self._user_environ.load() environ_save.load()
def run_command_inline(argv, funct, *args, **kwargs): argv_save = sys.argv environ_save = EnvironmentSnapshot() captured_stdout = StringIO() captured_stderr = StringIO() sys.argv = argv exitcode = None print(' '.join(argv)) with redirect_stdout(captured_stdout): with redirect_stderr(captured_stderr): try: exitcode = funct(*args, **kwargs) except SystemExit as e: exitcode = e.code finally: # restore environment, command-line arguments, and the native # modules system environ_save.load() sys.argv = argv_save fixtures.init_native_modules_system() return (exitcode, captured_stdout.getvalue(), captured_stderr.getvalue())
def run_check(self, check, partition, environ): try: executor = RegressionTestExecutor(check, self.strict_check) testcase = TestCase(executor) executor.setup(partition=partition, environ=environ, sched_account=self.sched_account, sched_partition=self.sched_partition, sched_reservation=self.sched_reservation, sched_nodelist=self.sched_nodelist, sched_exclude_nodelist=self.sched_exclude_nodelist, sched_options=self.sched_options) ready_testcase = RunningTestCase(testcase, EnvironmentSnapshot()) partname = partition.fullname if self._running_cases_counts[partname] >= partition.max_jobs: # Make sure that we still exceeded the job limit getlogger().debug('reached job limit (%s) for partition %s' % (partition.max_jobs, partname)) self._update_running_counts() if self._running_cases_counts[partname] < partition.max_jobs: # Test's environment is already loaded; no need to be reloaded self._reschedule(ready_testcase, load_env=False) else: self._print_executor_status('HOLD', executor) self._ready_cases[partname].append(ready_testcase) except (KeyboardInterrupt, ReframeFatalError, AssertionError): if not testcase.failed(): # test case failed during setup testcase.fail(sys.exc_info()) self._failall() raise except: # Here we are sure that test case has failed during setup, since # _compile_and_run() handles already non-fatal exceptions. Though # we check again the testcase, just in case. if not testcase.failed(): testcase.fail(sys.exc_info()) finally: if testcase.valid() and testcase.failed_stage == 'setup': # We need to print the result here only if the setup stage has # finished, since otherwise _compile_and_run() prints it self.printer.result(executor.check, partition, environ, not testcase.failed()) self._test_cases.append(testcase) self.environ_snapshot.load()
def __init__(self): # Options controlling the check execution self.skip_system_check = False self.force_local = False self.skip_environ_check = False self.skip_sanity_check = False self.skip_performance_check = False self.keep_stage_files = False self.only_environs = None self.printer = None self.environ_snapshot = EnvironmentSnapshot() self.strict_check = False # Scheduler options self.sched_account = None self.sched_partition = None self.sched_reservation = None self.sched_nodelist = None self.sched_exclude_nodelist = None self.sched_options = []
def setup(self, *args, **kwargs): self._safe_call(self._check.setup, *args, **kwargs) self._environ = EnvironmentSnapshot()
class RegressionTask: """A class representing a :class:`RegressionTest` through the regression pipeline.""" def __init__(self, check, listeners=[]): self._check = check self._failed_stage = None self._current_stage = None self._exc_info = (None, None, None) self._environ = None self._listeners = list(listeners) # Test case has finished, but has not been waited for yet self.zombie = False @property def check(self): return self._check @property def exc_info(self): return self._exc_info @property def failed(self): return self._failed_stage is not None @property def failed_stage(self): return self._failed_stage def _notify_listeners(self, callback_name): for l in self._listeners: callback = getattr(l, callback_name) callback(self) def _safe_call(self, fn, *args, **kwargs): self._current_stage = fn.__name__ try: with logging.logging_context(self._check) as logger: logger.debug('entering stage: %s' % self._current_stage) return fn(*args, **kwargs) except ABORT_REASONS: self.fail() raise except BaseException as e: self.fail() raise TaskExit from e def setup(self, *args, **kwargs): self._safe_call(self._check.setup, *args, **kwargs) self._environ = EnvironmentSnapshot() def compile(self): self._safe_call(self._check.compile) def run(self): self._safe_call(self._check.run) self._notify_listeners('on_task_run') def wait(self): self._safe_call(self._check.wait) self.zombie = False def poll(self): finished = self._safe_call(self._check.poll) if finished: self.zombie = True self._notify_listeners('on_task_exit') return finished def sanity(self): self._safe_call(self._check.sanity) def performance(self): self._safe_call(self._check.performance) def cleanup(self, *args, **kwargs): self._safe_call(self._check.cleanup, *args, **kwargs) self._notify_listeners('on_task_success') def fail(self, exc_info=None): self._failed_stage = self._current_stage self._exc_info = exc_info or sys.exc_info() self._notify_listeners('on_task_failure') def resume(self): self._environ.load() def abort(self, cause=None): logging.getlogger().debug('aborting: %s' % self._check.info()) exc = AbortTaskError() exc.__cause__ = cause try: # FIXME: we should perhaps extend the RegressionTest interface # for supporting job cancelling if not self.zombie and self._check.job: self._check.job.cancel() except JobNotStartedError: self.fail((type(exc), exc, None)) except BaseException: self.fail() else: self.fail((type(exc), exc, None))
class Runner: """Responsible for executing a set of regression tests based on an execution policy.""" def __init__(self, policy, printer=None, max_retries=0): self._policy = policy self._printer = printer or PrettyPrinter() self._max_retries = max_retries self._current_run = 0 self._stats = TestStats() self._policy.stats = self._stats self._policy.printer = self._printer self._sandbox = Sandbox() self._environ_snapshot = EnvironmentSnapshot() def __repr__(self): return debug.repr(self) @property def policy(self): return self._policy @property def stats(self): return self._stats def runall(self, checks, system): try: self._printer.separator('short double line', 'Running %d check(s)' % len(checks)) self._printer.timestamp('Started on', 'short double line') self._printer.info() self._runall(checks, system) if self._max_retries: self._retry_failed(checks, system) finally: # Print the summary line num_failures = self._stats.num_failures() num_cases = self._stats.num_cases(run=0) self._printer.status( 'FAILED' if num_failures else 'PASSED', 'Ran %d test case(s) from %d check(s) (%d failure(s))' % (num_cases, len(checks), num_failures), just='center' ) self._printer.timestamp('Finished on', 'short double line') self._environ_snapshot.load() def _partition_supported(self, check, partition): if self._policy.skip_system_check: return True return check.supports_system(partition.name) def _environ_supported(self, check, environ): ret = True if self._policy.only_environs: ret = environ.name in self._policy.only_environs if self._policy.skip_environ_check: return ret else: return ret and check.supports_environ(environ.name) def _retry_failed(self, checks, system): while (self._stats.num_failures() and self._current_run < self._max_retries): failed_checks = [ c for c in checks if c.name in set([t.check.name for t in self._stats.tasks_failed()]) ] self._current_run += 1 self._stats.next_run() if self._stats.current_run != self._current_run: raise AssertionError('current_run variable out of sync' '(Runner: %d; TestStats: %d)' % self._current_run, self._stats.current_run) self._printer.separator( 'short double line', 'Retrying %d failed check(s) (retry %d/%d)' % (len(failed_checks), self._current_run, self._max_retries) ) self._runall(failed_checks, system) def _runall(self, checks, system): self._policy.enter() for c in checks: self._policy.enter_check(c) for p in system.partitions: if not self._partition_supported(c, p): self._printer.status('SKIP', 'skipping %s' % p.fullname, just='center', level=logging.VERBOSE) continue self._policy.enter_partition(c, p) for e in p.environs: if not self._environ_supported(c, e): self._printer.status('SKIP', 'skipping %s for %s' % (e.name, p.fullname), just='center', level=logging.VERBOSE) continue self._sandbox.system = p self._sandbox.environ = e self._sandbox.check = c self._policy.enter_environ(self._sandbox.check, self._sandbox.system, self._sandbox.environ) self._environ_snapshot.load() self._policy.run_check(self._sandbox.check, self._sandbox.system, self._sandbox.environ) self._policy.exit_environ(self._sandbox.check, self._sandbox.system, self._sandbox.environ) self._policy.exit_partition(c, p) self._policy.exit_check(c) self._policy.exit()
def setUp(self): self.environ_save = EnvironmentSnapshot() self.modules_system.searchpath_add(TEST_MODULES)
class _TestModulesSystem: def setUp(self): self.environ_save = EnvironmentSnapshot() self.modules_system.searchpath_add(TEST_MODULES) def tearDown(self): self.environ_save.load() def test_searchpath(self): self.assertIn(TEST_MODULES, self.modules_system.searchpath) self.modules_system.searchpath_remove(TEST_MODULES) self.assertNotIn(TEST_MODULES, self.modules_system.searchpath) def test_module_load(self): self.assertRaises(EnvironError, self.modules_system.load_module, 'foo') self.assertFalse(self.modules_system.is_module_loaded('foo')) self.assertNotIn('foo', self.modules_system.loaded_modules()) self.modules_system.load_module('testmod_foo') self.assertTrue(self.modules_system.is_module_loaded('testmod_foo')) self.assertIn('testmod_foo', self.modules_system.loaded_modules()) self.assertIn('TESTMOD_FOO', os.environ) self.modules_system.unload_module('testmod_foo') self.assertFalse(self.modules_system.is_module_loaded('testmod_foo')) self.assertNotIn('testmod_foo', self.modules_system.loaded_modules()) self.assertNotIn('TESTMOD_FOO', os.environ) def test_module_load_force(self): self.modules_system.load_module('testmod_foo') unloaded = self.modules_system.load_module('testmod_foo', force=True) self.assertEqual(0, len(unloaded)) self.assertTrue(self.modules_system.is_module_loaded('testmod_foo')) unloaded = self.modules_system.load_module('testmod_bar', force=True) self.assertTrue(self.modules_system.is_module_loaded('testmod_bar')) self.assertFalse(self.modules_system.is_module_loaded('testmod_foo')) self.assertIn('testmod_foo', unloaded) self.assertIn('TESTMOD_BAR', os.environ) def test_module_unload_all(self): self.modules_system.load_module('testmod_base') self.modules_system.unload_all() self.assertEqual(0, len(self.modules_system.loaded_modules())) def test_module_list(self): self.modules_system.load_module('testmod_foo') self.assertIn('testmod_foo', self.modules_system.loaded_modules()) self.modules_system.unload_module('testmod_foo') def test_module_conflict_list(self): conflict_list = self.modules_system.conflicted_modules('testmod_bar') self.assertIn('testmod_foo', conflict_list) self.assertIn('testmod_boo', conflict_list) @abc.abstractmethod def expected_load_instr(self, module): """Expected load instruction.""" @abc.abstractmethod def expected_unload_instr(self, module): """Expected unload instruction.""" def test_emit_load_commands(self): self.modules_system.module_map = { 'm0': ['m1', 'm2'] } self.assertEqual([self.expected_load_instr('foo')], self.modules_system.emit_load_commands('foo')) self.assertEqual([self.expected_load_instr('foo/1.2')], self.modules_system.emit_load_commands('foo/1.2')) self.assertEqual([self.expected_load_instr('m1'), self.expected_load_instr('m2')], self.modules_system.emit_load_commands('m0')) def test_emit_unload_commands(self): self.modules_system.module_map = { 'm0': ['m1', 'm2'] } self.assertEqual([self.expected_unload_instr('foo')], self.modules_system.emit_unload_commands('foo')) self.assertEqual([self.expected_unload_instr('foo/1.2')], self.modules_system.emit_unload_commands('foo/1.2')) self.assertEqual([self.expected_unload_instr('m2'), self.expected_unload_instr('m1')], self.modules_system.emit_unload_commands('m0'))