def run(self): """The run phase of the regression test pipeline. This call is non-blocking. It simply submits the job associated with this test and returns. """ if not self.current_system or not self._current_partition: raise PipelineError('no system or system partition is set') exec_cmd = [ self.job.launcher.run_command(self.job), self.executable, *self.executable_opts ] commands = [*self.pre_run, ' '.join(exec_cmd), *self.post_run] environs = [ self._current_partition.local_env, self._current_environ, self._user_environ ] with os_ext.change_dir(self._stagedir): try: self._job.prepare(commands, environs, login=True) except OSError as e: raise PipelineError('failed to prepare job') from e self._job.submit() msg = ('spawned job (%s=%s)' % ('pid' if self.is_local() else 'jobid', self._job.jobid)) self.logger.debug(msg)
def check_performance(self): """The performance checking phase of the regression test pipeline. :raises reframe.core.exceptions.SanityError: If the performance check fails. """ if self.perf_patterns is None: return with os_ext.change_dir(self._stagedir): # We first evaluate and log all performance values and then we # check them against the reference. This way we always log them # even if the don't meet the reference. perf_values = [] for tag, expr in self.perf_patterns.items(): value = evaluate(expr) key = '%s:%s' % (self._current_partition.fullname, tag) if key not in self.reference: raise SanityError( "tag `%s' not resolved in references for `%s'" % (tag, self._current_partition.fullname)) perf_values.append((value, self.reference[key])) self._perf_logger.log_performance(logging.INFO, tag, value, *self.reference[key]) for val, reference in perf_values: ref, low_thres, high_thres, *_ = reference evaluate(assert_reference(val, ref, low_thres, high_thres))
def test_exception_propagation(self): try: with os_ext.change_dir(self.temp_dir): raise RuntimeError except RuntimeError: assert os.getcwd() == self.wd_save else: pytest.fail('exception not propagated by the ctx manager')
def test_exception_propagation(self): try: with os_ext.change_dir(self.temp_dir): raise RuntimeError except RuntimeError: self.assertEqual(os.getcwd(), self.wd_save) else: self.fail('exception not propagated by the ctx manager')
def test_exception_propagation(tmpdir): wd_save = os.getcwd() try: with os_ext.change_dir(tmpdir): raise RuntimeError except RuntimeError: assert os.getcwd() == wd_save else: pytest.fail('exception not propagated by the ctx manager')
def _merge_files(self, job): with os_ext.change_dir(job.workdir): out_glob = glob.glob(job.stdout + '_*') err_glob = glob.glob(job.stderr + '_*') getlogger().debug( 'merging job array output files: %s' % ', '.join(out_glob)) os_ext.concat_files(job.stdout, *out_glob, overwrite=True) getlogger().debug( 'merging job array error files: %s' % ','.join(err_glob)) os_ext.concat_files(job.stderr, *err_glob, overwrite=True)
def finished(self, job): with os_ext.change_dir(job.workdir): done = os.path.exists(job.stdout) and os.path.exists(job.stderr) if done: t_now = datetime.now() self._time_finished = self._time_finished or t_now time_from_finish = (t_now - self._time_finished).total_seconds() return done and time_from_finish > PBS_OUTPUT_WRITEBACK_WAIT
def test_load_relative(self): with os_ext.change_dir('reframe'): # Load a module from a directory up module = util.import_module_from_file('../reframe/__init__.py') self.assertEqual(reframe.VERSION, module.VERSION) self.assertEqual('reframe', module.__name__) self.assertIs(module, sys.modules.get('reframe')) # Load a module from the current directory module = util.import_module_from_file('utility/os_ext.py') self.assertEqual('reframe.utility.os_ext', module.__name__) self.assertIs(module, sys.modules.get('reframe.utility.os_ext'))
def check_sanity(self): """The sanity checking phase of the regression test pipeline. :raises reframe.core.exceptions.SanityError: If the sanity check fails. """ if self.sanity_patterns is None: raise SanityError('sanity_patterns not set') with os_ext.change_dir(self._stagedir): success = evaluate(self.sanity_patterns) if not success: raise SanityError()
def test_load_relative(self): with os_ext.change_dir('reframe'): # Load a module from a directory up module = util.import_module_from_file('../reframe/__init__.py') assert reframe.VERSION == module.VERSION assert 'reframe' == module.__name__ assert module is sys.modules.get('reframe') # Load a module from the current directory module = util.import_module_from_file('utility/os_ext.py') assert 'reframe.utility.os_ext' == module.__name__ assert module is sys.modules.get('reframe.utility.os_ext')
def test_concat_files(tmpdir): with os_ext.change_dir(tmpdir): file1 = 'in1.txt' file2 = 'in2.txt' concat_file = 'out.txt' with open(file1, 'w') as f1: f1.write('Hello1') with open(file2, 'w') as f2: f2.write('Hello2') os_ext.concat_files(concat_file, file1, file2, overwrite=True) with open(concat_file) as cf: out = cf.read() assert out == 'Hello1\nHello2\n'
def read_timestamps(self, tasks): '''Read the timestamps and sort them to permit simple concurrency tests.''' from reframe.utility.sanity import evaluate self.begin_stamps = [] self.end_stamps = [] for t in tasks: with os_ext.change_dir(t.check.stagedir): with open(evaluate(t.check.stdout), 'r') as f: self.begin_stamps.append(float(f.readline().strip())) self.end_stamps.append(float(f.readline().strip())) self.begin_stamps.sort() self.end_stamps.sort()
def test_concat_files(self): with tempfile.TemporaryDirectory(dir='unittests') as tmpdir: with os_ext.change_dir(tmpdir): file1 = 'in1.txt' file2 = 'in2.txt' concat_file = 'out.txt' with open(file1, 'w') as f1: f1.write('Hello1') with open(file2, 'w') as f2: f2.write('Hello2') os_ext.concat_files(concat_file, file1, file2, overwrite=True) with open(concat_file) as cf: out = cf.read() assert out == 'Hello1\nHello2\n'
def _poll_job(self, job): if job is None: return with os_ext.change_dir(job.workdir): output_ready = (os.path.exists(job.stdout) and os.path.exists(job.stderr)) done = job.cancelled or output_ready if done: t_now = time.time() if job.completion_time is None: job._completion_time = t_now time_from_finish = t_now - job.completion_time if time_from_finish > PBS_OUTPUT_WRITEBACK_WAIT: job._completed = True
def run(self): """The run phase of the regression test pipeline. This call is non-blocking. It simply submits the job associated with this test and returns. """ if not self._current_system or not self._current_partition: raise PipelineError('no system or system partition is set') with os_ext.change_dir(self._stagedir): try: self._job.prepare(BashScriptBuilder(login=True)) except OSError as e: raise PipelineError('failed to prepare job') from e self._job.submit() msg = ('spawned job (%s=%s)' % ('pid' if self.is_local() else 'jobid', self._job.jobid)) self.logger.debug(msg)
def check_performance(self): """The performance checking phase of the regression test pipeline. :raises reframe.core.exceptions.SanityError: If the performance check fails. """ if self.perf_patterns is None: return with os_ext.change_dir(self._stagedir): for tag, expr in self.perf_patterns.items(): value = evaluate(expr) key = '%s:%s' % (self._current_partition.fullname, tag) try: ref, low_thres, high_thres = self.reference[key] self._perf_logger.info('value: %s, reference: %s' % (value, self.reference[key])) except KeyError: raise SanityError( "tag `%s' not resolved in references for `%s'" % (tag, self._current_partition.fullname)) evaluate(assert_reference(value, ref, low_thres, high_thres))
def check_performance(self): """The performance checking phase of the regression test pipeline. :raises reframe.core.exceptions.SanityError: If the performance check fails. """ if self.perf_patterns is None: return with os_ext.change_dir(self._stagedir): # We first evaluate and log all performance values and then we # check them against the reference. This way we always log them # even if the don't meet the reference. for tag, expr in self.perf_patterns.items(): value = evaluate(expr) key = '%s:%s' % (self._current_partition.fullname, tag) if key not in self.reference: raise SanityError( "tag `%s' not resolved in references for `%s'" % (tag, self._current_partition.fullname)) self._perfvalues[key] = (tag, value, *self.reference[key]) self._perf_logger.log_performance(logging.INFO, tag, value, *self.reference[key]) for values in self._perfvalues.values(): tag, val, ref, low_thres, high_thres, *_ = values try: evaluate( assert_reference( val, ref, low_thres, high_thres, msg=('failed to meet reference: %s={0}, ' 'expected {1} (l={2}, u={3})' % tag), )) except SanityError as e: raise PerformanceError(e)
def compile(self): """The compilation phase of the regression test pipeline. :raises reframe.core.exceptions.ReframeError: In case of errors. """ if not self._current_environ: raise PipelineError('no programming environment set') # Copy the check's resources to the stage directory if self.sourcesdir: try: commonpath = os.path.commonpath( [self.sourcesdir, self.sourcepath]) except ValueError: commonpath = None if commonpath: self.logger.warn( "sourcepath `%s' seems to be a subdirectory of " "sourcesdir `%s', but it will be interpreted " "as relative to it." % (self.sourcepath, self.sourcesdir)) if os_ext.is_url(self.sourcesdir): self._clone_to_stagedir(self.sourcesdir) else: self._copy_to_stagedir( os.path.join(self._prefix, self.sourcesdir)) # Verify the sourcepath and determine the sourcepath in the stagedir if (os.path.isabs(self.sourcepath) or os.path.normpath(self.sourcepath).startswith('..')): raise PipelineError( 'self.sourcepath is an absolute path or does not point to a ' 'subfolder or a file contained in self.sourcesdir: ' + self.sourcepath) staged_sourcepath = os.path.join(self._stagedir, self.sourcepath) self.logger.debug('Staged sourcepath: %s' % staged_sourcepath) if os.path.isdir(staged_sourcepath): if not self.build_system: # Try to guess the build system cmakelists = os.path.join(staged_sourcepath, 'CMakeLists.txt') configure_ac = os.path.join(staged_sourcepath, 'configure.ac') configure_in = os.path.join(staged_sourcepath, 'configure.in') if os.path.exists(cmakelists): self.build_system = 'CMake' self.build_system.builddir = 'rfm_build' elif (os.path.exists(configure_ac) or os.path.exists(configure_in)): self.build_system = 'Autotools' self.build_system.builddir = 'rfm_build' else: self.build_system = 'Make' self.build_system.srcdir = self.sourcepath else: if not self.build_system: self.build_system = 'SingleSource' self.build_system.srcfile = self.sourcepath self.build_system.executable = self.executable # Prepare build job build_commands = [ *self.prebuild_cmd, *self.build_system.emit_build_commands(self._current_environ), *self.postbuild_cmd ] environs = [ self._current_partition.local_env, self._current_environ, self._user_environ ] self._build_job = getscheduler('local')( name='rfm_%s_build' % self.name, launcher=getlauncher('local')(), workdir=self._stagedir) with os_ext.change_dir(self._stagedir): try: self._build_job.prepare(build_commands, environs, login=True, trap_errors=True) except OSError as e: raise PipelineError('failed to prepare build job') from e self._build_job.submit()
def test_change_dir_working(self): with os_ext.change_dir(self.temp_dir): assert os.getcwd(), self.temp_dir assert os.getcwd() == self.wd_save
def test_change_dir_working(tmpdir): wd_save = os.getcwd() with os_ext.change_dir(tmpdir): assert os.getcwd() == tmpdir assert os.getcwd() == wd_save
def compile(self, **compile_opts): """The compilation phase of the regression test pipeline. :arg compile_opts: Extra options to be passed to the programming environment for compiling the source code of the test. :raises reframe.core.exceptions.ReframeError: In case of errors. """ if not self._current_environ: raise PipelineError('no programming environment set') # Copy the check's resources to the stage directory if self.sourcesdir: try: commonpath = os.path.commonpath( [self.sourcesdir, self.sourcepath]) except ValueError: commonpath = None if commonpath: self.logger.warn( "sourcepath (`%s') seems to be a subdirectory of " "sourcesdir (`%s'), but it will be interpreted " "as relative to it." % (self.sourcepath, self.sourcesdir)) if os_ext.is_url(self.sourcesdir): self._clone_to_stagedir(self.sourcesdir) else: self._copy_to_stagedir( os.path.join(self._prefix, self.sourcesdir)) # Verify the sourcepath and determine the sourcepath in the stagedir if (os.path.isabs(self.sourcepath) or os.path.normpath(self.sourcepath).startswith('..')): raise PipelineError( 'self.sourcepath is an absolute path or does not point to a ' 'subfolder or a file contained in self.sourcesdir: ' + self.sourcepath) staged_sourcepath = os.path.join(self._stagedir, self.sourcepath) self.logger.debug('Staged sourcepath: %s' % staged_sourcepath) # Remove source and executable from compile_opts compile_opts.pop('source', None) compile_opts.pop('executable', None) # Change working dir to stagedir although absolute paths are used # everywhere in the compilation process. This is done to ensure that # any other files (besides the executable) generated during the the # compilation will remain in the stage directory with os_ext.change_dir(self._stagedir): self.prebuild() if os.path.isdir(staged_sourcepath): includedir = staged_sourcepath else: includedir = os.path.dirname(staged_sourcepath) self._current_environ.include_search_path.append(includedir) self._compile_task = self._current_environ.compile( sourcepath=staged_sourcepath, executable=os.path.join(self._stagedir, self.executable), **compile_opts) self.logger.debug('compilation stdout:\n%s' % self._compile_task.stdout) self.logger.debug('compilation stderr:\n%s' % self._compile_task.stderr) self.postbuild() self.logger.debug('compilation finished')
def test_load_directory_relative(self): with os_ext.change_dir('reframe'): module = util.import_module_from_file('../reframe') assert reframe.VERSION == module.VERSION assert 'reframe' == module.__name__ assert module is sys.modules.get('reframe')
def check_performance(self): """The performance checking phase of the regression test pipeline. :raises reframe.core.exceptions.SanityError: If the performance check fails. """ if self.perf_patterns is None: return with os_ext.change_dir(self._stagedir): # Check if default reference perf values are provided and # store all the variables tested in the performance check has_default = False variables = set() for key, ref in self.reference.items(): keyparts = key.split(self.reference.scope_separator) system = keyparts[0] varname = keyparts[-1] try: unit = ref[3] except IndexError: unit = None variables.add((varname, unit)) if system == '*': has_default = True break if not has_default: if not variables: # If empty, it means that self.reference was empty, so try # to infer their name from perf_patterns variables = {(name, None) for name in self.perf_patterns.keys()} for var in variables: name, unit = var ref_tuple = (0, None, None) if unit: ref_tuple += (unit, ) self.reference.update({'*': {name: ref_tuple}}) # We first evaluate and log all performance values and then we # check them against the reference. This way we always log them # even if the don't meet the reference. for tag, expr in self.perf_patterns.items(): value = evaluate(expr) key = '%s:%s' % (self._current_partition.fullname, tag) if key not in self.reference: raise SanityError( "tag `%s' not resolved in references for `%s'" % (tag, self._current_partition.fullname)) self._perfvalues[key] = (value, *self.reference[key]) self._perf_logger.log_performance(logging.INFO, tag, value, *self.reference[key]) for key, values in self._perfvalues.items(): val, ref, low_thres, high_thres, *_ = values tag = key.split(':')[-1] try: evaluate( assert_reference( val, ref, low_thres, high_thres, msg=('failed to meet reference: %s={0}, ' 'expected {1} (l={2}, u={3})' % tag))) except SanityError as e: raise PerformanceError(e)
def test_load_directory_relative(self): with os_ext.change_dir('reframe'): module = util.import_module_from_file('../reframe') self.assertEqual(reframe.VERSION, module.VERSION) self.assertEqual('reframe', module.__name__) self.assertIs(module, sys.modules.get('reframe'))
def test_change_dir_working(self): with os_ext.change_dir(self.temp_dir): self.assertTrue(os.getcwd(), self.temp_dir) self.assertEqual(os.getcwd(), self.wd_save)