def verify_job_external(self, input_file, args, verbose=1): '''Run user-supplied verifier script. Assume function is executed in self.path.''' verify_cmd, = self.test_program.extract_cmd(self.path, input_file, args) try: if verbose > 2: print('Analysing test using %s in %s.' % (verify_cmd, self.path)) verify_popen = subprocess.Popen(verify_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) verify_popen.wait() except OSError: # slightly odd syntax in order to be compatible with python 2.5 # and python 2.6/3 err = 'Analysis of test failed: %s' % (sys.exc_info()[1], ) raise exceptions.AnalysisError(err) output = verify_popen.communicate()[0].decode('utf-8') if verbose < 2: # Suppress output. (hackhack) output = '' if verify_popen.returncode == 0: return (validation.Status([True]), output) else: return (validation.Status([False]), output)
def _skip_job(self, input_file, args, verbose=1): '''Run user-supplied command to check if test should be skipped. IMPORTANT: use self.skip_job rather than self._skip_job if using multiple threads. Decorated to skip_job, which acquires directory lock and enters self.path first, during initialisation.''' status = validation.Status() if self.test_program.skip_program: cmd = self.test_program.skip_cmd(input_file, args) try: if verbose > 2: print('Testing whether to skip test using %s in %s.' % (cmd, self.path)) skip_popen = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) skip_popen.wait() if skip_popen.returncode == 0: # skip this test status = validation.Status(name='skipped') except OSError: # slightly odd syntax in order to be compatible with python # 2.5 and python 2.6/3 if verbose > 2: print('Test to skip test: %s' % (sys.exc_info()[1], )) return (status, '')
def _update_status(self, status, inp_arg): '''Update self.status with success of a test.''' if status: self.status[inp_arg] = status else: # Something went wrong. Store a Status failed object. self.status[inp_arg] = validation.Status([False])
def _verify_job(self, input_file, args, verbose=1, rundir=None): '''Check job against benchmark. Assume function is executed in self.path. IMPORTANT: use self.verify_job rather than self._verify_job if using multiple threads. Decorated to verify_job, which acquires directory lock and enters self.path first, during initialisation.''' # We already have DIR_LOCK, so use _skip_job instead of skip_job. (status, msg) = self._skip_job(input_file, args, verbose) try: if self.test_program.verify and not status.skipped(): (status, msg) = self.verify_job_external(input_file, args, verbose) elif not status.skipped(): (bench_out, test_out) = self.extract_data(input_file, args, verbose) (comparable, status, msg) = validation.compare_data( bench_out, test_out, self.default_tolerance, self.tolerances, self.test_program.ignore_fields) if verbose > 2: # Include data tables in output. if comparable: # Combine test and benchmark dictionaries. data_table = util.pretty_print_table( ['benchmark', 'test'], [bench_out, test_out]) else: # Print dictionaries separately--couldn't even compare # them! data_table = '\n'.join( (util.pretty_print_table(['benchmark'], [bench_out]), util.pretty_print_table(['test '], [test_out]))) if msg.strip(): # join data table with error message from # validation.compare_data. msg = '\n'.join((msg, data_table)) else: msg = data_table except (exceptions.AnalysisError, exceptions.TestCodeError): if msg.strip(): msg = '%s\n%s' % (msg, sys.exc_info()[1]) else: msg = sys.exc_info()[1] status = validation.Status([False]) self._update_status(status, (input_file, args)) if verbose > 0 and verbose < 3: info_line = util.info_line(self.path, input_file, args, rundir) sys.stdout.write(info_line) status.print_status(msg, verbose) return (status, msg)
def skip_job(self, input_file, args, verbose=1): '''TODO ''' status = validation.Status() if self.test_program.skip_program: cmd = self.test_program.skip_cmd(input_file, args) try: if verbose > 2: print('Testing whether to skip test using %s in %s.' % (cmd, self.path)) skip_popen = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) skip_popen.wait() except OSError: # slightly odd syntax in order to be compatible with python # 2.5 and python 2.6/3 err = 'Test to skip test: %s' % (sys.exc_info()[1], ) raise exceptions.AnalysisError(err) if skip_popen.returncode == 0: # skip this test status = validation.Status(name='skipped') return (status, '')
def run_test(self, verbose=1, cluster_queue=None, rundir=None): '''Run all jobs in test.''' try: # Construct tests. test_cmds = [] test_files = [] for (test_input, test_arg) in self.inputs_args: if (test_input and not os.path.exists( os.path.join(self.path, test_input))): err = 'Input file does not exist: %s' % (test_input, ) raise exceptions.RunError(err) test_cmds.append( self.test_program.run_cmd(test_input, test_arg, self.nprocs)) test_files.append( util.testcode_filename(FILESTEM['test'], self.test_program.test_id, test_input, test_arg)) # Move files matching output pattern out of the way. self.move_old_output_files(verbose) # Run tests one-at-a-time locally or submit job in single submit # file to a queueing system. if cluster_queue: if self.output: for (ind, test) in enumerate(test_cmds): # Don't quote self.output if it contains any wildcards # (assume the user set it up correctly!) out = self.output if not compat.compat_any( wild in self.output for wild in ['*', '?', '[', '{']): out = pipes.quote(self.output) test_cmds[ind] = '%s; mv %s %s' % ( test_cmds[ind], out, pipes.quote(test_files[ind])) test_cmds = ['\n'.join(test_cmds)] for (ind, test) in enumerate(test_cmds): job = self.start_job(test, cluster_queue, verbose) job.wait() # Analyse tests as they finish. if cluster_queue: # Did all of them at once. for (test_input, test_arg) in self.inputs_args: self.verify_job(test_input, test_arg, verbose, rundir) else: # Did one job at a time. (test_input, test_arg) = self.inputs_args[ind] err = [] if self.output: try: self.move_output_to_test_output(test_files[ind]) except exceptions.RunError: err.append(sys.exc_info()[1]) status = validation.Status() if job.returncode != 0: err.insert( 0, 'Error running job. Return code: %i' % job.returncode) (status, msg) = self.skip_job(test_input, test_arg, verbose) if status.skipped(): self._update_status(status, (test_input, test_arg)) if verbose > 0 and verbose < 3: sys.stdout.write( util.info_line(self.path, test_input, test_arg, rundir)) status.print_status(msg, verbose) elif err: # re-raise first error we hit. raise exceptions.RunError(err[0]) else: self.verify_job(test_input, test_arg, verbose, rundir) except exceptions.RunError: err = sys.exc_info()[1] if verbose > 2: err = 'Test(s) in %s failed.\n%s' % (self.path, err) status = validation.Status([False]) self._update_status(status, (test_input, test_arg)) if verbose > 0 and verbose < 3: info_line = util.info_line(self.path, test_input, test_arg, rundir) sys.stdout.write(info_line) status.print_status(err, verbose) # Shouldn't run remaining tests after such a catastrophic failure. # Mark all remaining tests as skipped so the user knows that they # weren't run. err = 'Previous test in %s caused a system failure.' % (self.path) status = validation.Status(name='skipped') for ((test_input, test_arg), stat) in self.status.items(): if not self.status[(test_input, test_arg)]: self._update_status(status, (test_input, test_arg)) if verbose > 2: cmd = self.test_program.run_cmd( test_input, test_arg, self.nprocs) print('Test using %s in %s' % (cmd, self.path)) elif verbose > 0: info_line = util.info_line(self.path, test_input, test_arg, rundir) sys.stdout.write(info_line) status.print_status(err, verbose)