def readTestFunctions(self): """For each TestFile we are running tests in, load the list of test functions in that file""" #XXX TODO - error handling if a test is specified that does not exist sys.stderr.write("Finding tests...\n") def get_job(test_file): def get_tests(): def callback(rv, signal, proc, killed): test_file.updateTestLists(rv) if test_file.compile_failed: self.uncompiled_files.append(test_file) else: self.compiled_files.append(test_file) return test_file.getTests(self.engine), callback, None return get_tests test_finder = JobRunner(self.num_processes, timeout=30) for path, test_file in self.tests.iteritems(): self.all_files.append(relpath(path, test_path)) test_finder.queue([get_job(test_file)]) test_finder.run()
def runTests(self): all_tests = TestSet() passed = TestSet() failed = TestSet() valgrind = TestSet() crashed = TestSet() timed_out = TestSet() def get_job(test): def run_job(): def callback(rv, signal, proc, killed): test.setStatus(rv, signal, proc, killed) #This is cleanup code that should perhaps be elsewhere if test.testrunner_fn is not None: os.unlink(test.testrunner_fn) if test.test_code_fn is not None: os.unlink(test.test_code_fn) test.testrunner_fn = None return test.run( self.engine, opts.engine_args), callback, test.timeout_multipler return run_job def get_job_fast(test_file): def run_job(): def callback(rv, signal, proc, killed): test_file.setTestsStatus(rv, signal, proc, killed) #This is cleanup code that should perhaps be elsewhere if test_file.testrunner_fn is not None: os.unlink(test_file.testrunner_fn) test_file.testrunner_fn = None return test_file.runFile(self.engine, opts.engine_args), callback, None return run_job sys.stderr.write("Running tests...\n") if opts.fast: timeout = 10 elif opts.valgrind: timeout = 30 * 50 else: timeout = 30 test_runner = JobRunner(self.num_processes, timeout=timeout) if opts.fast: for test_file in self.compiled_files: test_names = self.test_cases[test_file.path] test_file.setTests(test_names) test_runner.queue([get_job_fast(test_file)]) else: for test_file in self.compiled_files: test_names = self.test_cases[test_file.path] test_runner.queue( [get_job(item) for item in test_file.setTests(test_names)]) test_runner.run() crashed_files = [] for test_file in self.compiled_files: if opts.fast and test_file.crashed: crashed_files.append(test_file) continue all_tests |= test_file.testsRun() passed |= test_file.testsPassed() failed |= test_file.testsFailed() valgrind |= test_file.testsValgrind() crashed |= test_file.testsCrashed() timed_out |= test_file.testsTimedOut() results = regression.Results( getEngineName(self.engine), passed, failed, valgrind, crashed, timed_out, loaded_files=self.all_files, compile_failed=[f.relative_path for f in self.uncompiled_files], crashed_files=[f.relative_path for f in crashed_files]) tests_by_id = {} for test_file in self.tests.itervalues(): for test in test_file.itervalues(): tests_by_id[test.id] = test return tests_by_id, results
def runTests(self): all_tests = TestSet() passed = TestSet() failed = TestSet() valgrind = TestSet() crashed = TestSet() timed_out = TestSet() def get_job(test): def run_job(): def callback(rv, signal, proc, killed): test.setStatus(rv, signal, proc, killed) #This is cleanup code that should perhaps be elsewhere if test.testrunner_fn is not None: os.unlink(test.testrunner_fn) if test.test_code_fn is not None: os.unlink(test.test_code_fn) test.testrunner_fn = None return test.run(self.engine, opts.engine_args), callback, test.timeout_multipler return run_job def get_job_fast(test_file): def run_job(): def callback(rv, signal, proc, killed): test_file.setTestsStatus(rv, signal, proc, killed) #This is cleanup code that should perhaps be elsewhere if test_file.testrunner_fn is not None: os.unlink(test_file.testrunner_fn) test_file.testrunner_fn = None return test_file.runFile(self.engine, opts.engine_args), callback, None return run_job sys.stderr.write("Running tests...\n") if opts.fast: timeout = 10 elif opts.valgrind: timeout = 30*50 else: timeout = 30 test_runner = JobRunner(self.num_processes, timeout=timeout) if opts.fast: for test_file in self.compiled_files: test_names = self.test_cases[test_file.path] test_file.setTests(test_names) test_runner.queue([get_job_fast(test_file)]) else: for test_file in self.compiled_files: test_names = self.test_cases[test_file.path] test_runner.queue([get_job(item) for item in test_file.setTests(test_names)]) test_runner.run() crashed_files = [] for test_file in self.compiled_files: if opts.fast and test_file.crashed: crashed_files.append(test_file) continue all_tests |= test_file.testsRun() passed |= test_file.testsPassed() failed |= test_file.testsFailed() valgrind |= test_file.testsValgrind() crashed |= test_file.testsCrashed() timed_out |= test_file.testsTimedOut() results = regression.Results(getEngineName(self.engine), passed, failed, valgrind, crashed, timed_out, loaded_files=self.all_files, compile_failed=[f.relative_path for f in self.uncompiled_files], crashed_files=[f.relative_path for f in crashed_files]) tests_by_id = {} for test_file in self.tests.itervalues(): for test in test_file.itervalues(): tests_by_id[test.id] = test return tests_by_id, results